From d90a702a8f003ba0644eb11533638bc6e0f7bd46 Mon Sep 17 00:00:00 2001 From: eric846 <56563761+eric846@users.noreply.github.com> Date: Fri, 11 Sep 2020 12:34:24 -0400 Subject: [PATCH 01/63] Adaptive load session spec proto helpers (#508) - `SetSessionSpecDefaults()`: Returns a copy of the `AdaptiveLoadSessionSpec` with default values added. - `CheckSessionSpec()`: Checks an `AdaptiveLoadSessionSpec` for illegal values, invalid plugin references, and invalid plugin configs. Part 6 of splitting PR #483. Signed-off-by: eric846 <56563761+eric846@users.noreply.github.com> --- include/nighthawk/adaptive_load/BUILD | 13 + .../adaptive_load/session_spec_proto_helper.h | 43 +++ source/adaptive_load/BUILD | 22 ++ .../session_spec_proto_helper_impl.cc | 119 ++++++++ .../session_spec_proto_helper_impl.h | 14 + test/adaptive_load/BUILD | 11 + .../fake_metrics_plugin.cc | 8 + .../fake_metrics_plugin/fake_metrics_plugin.h | 12 + .../fake_metrics_plugin_test.cc | 19 ++ .../session_spec_proto_helper_test.cc | 265 ++++++++++++++++++ 10 files changed, 526 insertions(+) create mode 100644 include/nighthawk/adaptive_load/session_spec_proto_helper.h create mode 100644 source/adaptive_load/session_spec_proto_helper_impl.cc create mode 100644 source/adaptive_load/session_spec_proto_helper_impl.h create mode 100644 test/adaptive_load/session_spec_proto_helper_test.cc diff --git a/include/nighthawk/adaptive_load/BUILD b/include/nighthawk/adaptive_load/BUILD index 32ed22151..1e0728795 100644 --- a/include/nighthawk/adaptive_load/BUILD +++ b/include/nighthawk/adaptive_load/BUILD @@ -96,6 +96,19 @@ envoy_basic_cc_library( ], ) +envoy_basic_cc_library( + name = "session_spec_proto_helper", + hdrs = [ + "session_spec_proto_helper.h", + ], + include_prefix = "nighthawk/adaptive_load", + deps = [ + "//api/adaptive_load:adaptive_load_proto_cc_proto", + "@com_google_absl//absl/status", + "@envoy//include/envoy/common:base_includes", + ], +) + envoy_basic_cc_library( name = "step_controller", hdrs = [ diff --git a/include/nighthawk/adaptive_load/session_spec_proto_helper.h b/include/nighthawk/adaptive_load/session_spec_proto_helper.h new file mode 100644 index 000000000..06e77ded7 --- /dev/null +++ b/include/nighthawk/adaptive_load/session_spec_proto_helper.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/common/pure.h" + +#include "api/adaptive_load/adaptive_load.pb.h" + +#include "absl/status/status.h" + +namespace Nighthawk { + +/** + * Utilities for setting default values and validating user settings in the main + * AdaptiveLoadSessionSpec proto. + */ +class AdaptiveLoadSessionSpecProtoHelper { +public: + virtual ~AdaptiveLoadSessionSpecProtoHelper() = default; + + /** + * Returns a copy of the input spec with default values inserted. Avoids overriding pre-set values + * in the original spec. + * + * @param spec Valid adaptive load session spec. + * + * @return Adaptive load session spec with default values inserted. + */ + virtual nighthawk::adaptive_load::AdaptiveLoadSessionSpec + SetSessionSpecDefaults(nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) PURE; + + /** + * Checks whether a session spec is valid: No forbidden fields in Nighthawk traffic spec; no bad + * plugin references or bad plugin configurations (step controller, metric, scoring function); no + * nonexistent metric names. Reports all errors in one pass. + * + * @param spec A potentially invalid adaptive load session spec. + * + * @return Status OK if no problems were found, or InvalidArgument with all errors. + */ + virtual absl::Status + CheckSessionSpec(const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) PURE; +}; + +} // namespace Nighthawk \ No newline at end of file diff --git a/source/adaptive_load/BUILD b/source/adaptive_load/BUILD index c6e0fcf7e..3bc034c93 100644 --- a/source/adaptive_load/BUILD +++ b/source/adaptive_load/BUILD @@ -119,6 +119,28 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "session_spec_proto_helper_impl", + srcs = [ + "session_spec_proto_helper_impl.cc", + ], + hdrs = [ + "session_spec_proto_helper_impl.h", + ], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + ":metrics_plugin_impl", + ":plugin_loader", + "//include/nighthawk/adaptive_load:metrics_plugin", + "//include/nighthawk/adaptive_load:scoring_function", + "//include/nighthawk/adaptive_load:session_spec_proto_helper", + "//include/nighthawk/adaptive_load:step_controller", + "@envoy//source/common/config:utility_lib_with_external_headers", + "@envoy//source/common/protobuf:protobuf_with_external_headers", + ], +) + envoy_cc_library( name = "step_controller_impl", srcs = [ diff --git a/source/adaptive_load/session_spec_proto_helper_impl.cc b/source/adaptive_load/session_spec_proto_helper_impl.cc new file mode 100644 index 000000000..3c45d3d86 --- /dev/null +++ b/source/adaptive_load/session_spec_proto_helper_impl.cc @@ -0,0 +1,119 @@ +#include "adaptive_load/session_spec_proto_helper_impl.h" + +#include "nighthawk/adaptive_load/metrics_plugin.h" +#include "nighthawk/adaptive_load/step_controller.h" + +#include "api/adaptive_load/adaptive_load.pb.h" +#include "api/adaptive_load/metric_spec.pb.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/strings/str_join.h" +#include "absl/strings/string_view.h" +#include "adaptive_load/metrics_plugin_impl.h" +#include "adaptive_load/plugin_loader.h" + +namespace Nighthawk { + +nighthawk::adaptive_load::AdaptiveLoadSessionSpec +AdaptiveLoadSessionSpecProtoHelperImpl::SetSessionSpecDefaults( + nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) { + if (!spec.nighthawk_traffic_template().has_open_loop()) { + spec.mutable_nighthawk_traffic_template()->mutable_open_loop()->set_value(true); + } + if (!spec.has_measuring_period()) { + spec.mutable_measuring_period()->set_seconds(10); + } + if (!spec.has_convergence_deadline()) { + spec.mutable_convergence_deadline()->set_seconds(300); + } + if (!spec.has_testing_stage_duration()) { + spec.mutable_testing_stage_duration()->set_seconds(30); + } + for (nighthawk::adaptive_load::MetricSpecWithThreshold& threshold : + *spec.mutable_metric_thresholds()) { + if (threshold.metric_spec().metrics_plugin_name().empty()) { + threshold.mutable_metric_spec()->set_metrics_plugin_name("nighthawk.builtin"); + } + if (!threshold.threshold_spec().has_weight()) { + threshold.mutable_threshold_spec()->mutable_weight()->set_value(1.0); + } + } + for (nighthawk::adaptive_load::MetricSpec& metric_spec : + *spec.mutable_informational_metric_specs()) { + if (metric_spec.metrics_plugin_name().empty()) { + metric_spec.set_metrics_plugin_name("nighthawk.builtin"); + } + } + return spec; +} + +absl::Status AdaptiveLoadSessionSpecProtoHelperImpl::CheckSessionSpec( + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) { + std::vector errors; + if (spec.nighthawk_traffic_template().has_duration()) { + errors.emplace_back( + "nighthawk_traffic_template should not have |duration| set. Set |measuring_period| " + "and |testing_stage_duration| in the AdaptiveLoadSessionSpec proto instead."); + } + absl::flat_hash_map plugin_from_name; + std::vector plugin_names = {"nighthawk.builtin"}; + plugin_from_name["nighthawk.builtin"] = + std::make_unique(nighthawk::client::Output()); + for (const envoy::config::core::v3::TypedExtensionConfig& config : + spec.metrics_plugin_configs()) { + plugin_names.push_back(config.name()); + absl::StatusOr metrics_plugin_or = LoadMetricsPlugin(config); + if (!metrics_plugin_or.ok()) { + errors.emplace_back( + absl::StrCat("Failed to load MetricsPlugin: ", metrics_plugin_or.status().message())); + continue; + } + plugin_from_name[config.name()] = std::move(metrics_plugin_or.value()); + } + absl::StatusOr step_controller_or = + LoadStepControllerPlugin(spec.step_controller_config(), spec.nighthawk_traffic_template()); + if (!step_controller_or.ok()) { + errors.emplace_back(absl::StrCat("Failed to load StepController plugin: ", + step_controller_or.status().message())); + } + std::vector all_metric_specs; + for (const nighthawk::adaptive_load::MetricSpecWithThreshold& metric_threshold : + spec.metric_thresholds()) { + all_metric_specs.push_back(metric_threshold.metric_spec()); + absl::StatusOr scoring_function_or = + LoadScoringFunctionPlugin(metric_threshold.threshold_spec().scoring_function()); + if (!scoring_function_or.ok()) { + errors.emplace_back(absl::StrCat("Failed to load ScoringFunction plugin: ", + scoring_function_or.status().message())); + } + } + for (const nighthawk::adaptive_load::MetricSpec& metric_spec : + spec.informational_metric_specs()) { + all_metric_specs.push_back(metric_spec); + } + for (const nighthawk::adaptive_load::MetricSpec& metric_spec : all_metric_specs) { + if (plugin_from_name.contains(metric_spec.metrics_plugin_name())) { + std::vector supported_metrics = + plugin_from_name[metric_spec.metrics_plugin_name()]->GetAllSupportedMetricNames(); + if (std::find(supported_metrics.begin(), supported_metrics.end(), + metric_spec.metric_name()) == supported_metrics.end()) { + errors.emplace_back( + absl::StrCat("Metric named '", metric_spec.metric_name(), + "' not implemented by plugin '", metric_spec.metrics_plugin_name(), + "'. Metrics implemented: ", absl::StrJoin(supported_metrics, ", "), ".")); + } + } else { + errors.emplace_back(absl::StrCat( + "MetricSpec referred to nonexistent metrics_plugin_name '", + metric_spec.metrics_plugin_name(), + "'. You must declare the plugin in metrics_plugin_configs or use plugin ", + "'nighthawk.builtin'. Available plugins: ", absl::StrJoin(plugin_names, ", "), ".")); + } + } + if (errors.size() > 0) { + return absl::InvalidArgumentError(absl::StrJoin(errors, "\n")); + } + return absl::OkStatus(); +} + +} // namespace Nighthawk diff --git a/source/adaptive_load/session_spec_proto_helper_impl.h b/source/adaptive_load/session_spec_proto_helper_impl.h new file mode 100644 index 000000000..d47e3a680 --- /dev/null +++ b/source/adaptive_load/session_spec_proto_helper_impl.h @@ -0,0 +1,14 @@ +#include "nighthawk/adaptive_load/session_spec_proto_helper.h" + +namespace Nighthawk { + +class AdaptiveLoadSessionSpecProtoHelperImpl : public AdaptiveLoadSessionSpecProtoHelper { +public: + nighthawk::adaptive_load::AdaptiveLoadSessionSpec + SetSessionSpecDefaults(nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) override; + + absl::Status + CheckSessionSpec(const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) override; +}; + +} // namespace Nighthawk diff --git a/test/adaptive_load/BUILD b/test/adaptive_load/BUILD index a8639f34e..78518f6cb 100644 --- a/test/adaptive_load/BUILD +++ b/test/adaptive_load/BUILD @@ -63,6 +63,17 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "session_spec_proto_helper_test", + srcs = ["session_spec_proto_helper_test.cc"], + repository = "@envoy", + deps = [ + "//source/adaptive_load:session_spec_proto_helper_impl", + "//test/adaptive_load/fake_plugins/fake_metrics_plugin", + "//test/adaptive_load/fake_plugins/fake_step_controller", + ], +) + envoy_cc_test( name = "scoring_function_test", srcs = ["scoring_function_test.cc"], diff --git a/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.cc b/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.cc index 41909cf4e..1f1c6e694 100644 --- a/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.cc +++ b/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.cc @@ -81,4 +81,12 @@ FakeMetricsPluginConfigFactory::ValidateConfig(const Envoy::Protobuf::Message& m REGISTER_FACTORY(FakeMetricsPluginConfigFactory, MetricsPluginConfigFactory); +envoy::config::core::v3::TypedExtensionConfig MakeFakeMetricsPluginTypedExtensionConfig( + const nighthawk::adaptive_load::FakeMetricsPluginConfig& config) { + envoy::config::core::v3::TypedExtensionConfig outer_config; + outer_config.set_name("nighthawk.fake_metrics_plugin"); + outer_config.mutable_typed_config()->PackFrom(config); + return outer_config; +} + } // namespace Nighthawk diff --git a/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.h b/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.h index 1bb6b9fd2..96c2e4446 100644 --- a/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.h +++ b/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.h @@ -50,4 +50,16 @@ class FakeMetricsPluginConfigFactory : public MetricsPluginConfigFactory { // This factory is activated through LoadMetricsPlugin in plugin_util.h. DECLARE_FACTORY(FakeMetricsPluginConfigFactory); +/** + * Creates a TypedExtensionConfig that activates a FakeMetricsPlugin by name with the given config + * proto. + * + * @param config The plugin-specific config proto to be packed into the typed_config Any. + * + * @return TypedExtensionConfig A proto that activates a FakeMetricsPlugin by name with a bundled + * config proto. + */ +envoy::config::core::v3::TypedExtensionConfig MakeFakeMetricsPluginTypedExtensionConfig( + const nighthawk::adaptive_load::FakeMetricsPluginConfig& config); + } // namespace Nighthawk diff --git a/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin_test.cc b/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin_test.cc index 547f89ba9..bfbc39ad8 100644 --- a/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin_test.cc +++ b/test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin_test.cc @@ -6,6 +6,7 @@ #include "api/client/options.pb.h" #include "test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.h" +#include "test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.pb.h" #include "adaptive_load/plugin_loader.h" #include "gmock/gmock.h" @@ -114,5 +115,23 @@ TEST(FakeMetricsPlugin, GetAllSupportedMetricNamesReturnsCorrectValues) { ::testing::UnorderedElementsAre("metric1", "metric2")); } +TEST(MakeFakeMetricsPluginTypedExtensionConfig, SetsCorrectPluginName) { + envoy::config::core::v3::TypedExtensionConfig activator = + MakeFakeMetricsPluginTypedExtensionConfig( + nighthawk::adaptive_load::FakeMetricsPluginConfig()); + EXPECT_EQ(activator.name(), "nighthawk.fake_metrics_plugin"); +} + +TEST(MakeFakeMetricsPluginTypedExtensionConfig, PacksGivenConfigProto) { + nighthawk::adaptive_load::FakeMetricsPluginConfig expected_config; + expected_config.mutable_fake_metrics()->Add()->set_name("a"); + envoy::config::core::v3::TypedExtensionConfig activator = + MakeFakeMetricsPluginTypedExtensionConfig(expected_config); + nighthawk::adaptive_load::FakeMetricsPluginConfig actual_config; + Envoy::MessageUtil::unpackTo(activator.typed_config(), actual_config); + EXPECT_EQ(expected_config.DebugString(), actual_config.DebugString()); + EXPECT_TRUE(MessageDifferencer::Equivalent(expected_config, actual_config)); +} + } // namespace } // namespace Nighthawk diff --git a/test/adaptive_load/session_spec_proto_helper_test.cc b/test/adaptive_load/session_spec_proto_helper_test.cc new file mode 100644 index 000000000..565475994 --- /dev/null +++ b/test/adaptive_load/session_spec_proto_helper_test.cc @@ -0,0 +1,265 @@ +#include "envoy/registry/registry.h" + +#include "external/envoy/source/common/config/utility.h" + +#include "api/adaptive_load/adaptive_load.pb.h" +#include "api/adaptive_load/metric_spec.pb.h" +#include "api/client/options.pb.h" + +#include "test/adaptive_load/fake_plugins/fake_metrics_plugin/fake_metrics_plugin.h" + +#include "adaptive_load/plugin_loader.h" +#include "adaptive_load/session_spec_proto_helper_impl.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Nighthawk { + +namespace { + +using ::nighthawk::adaptive_load::AdaptiveLoadSessionSpec; +using ::testing::HasSubstr; + +TEST(SetSessionSpecDefaults, SetsDefaultValueIfOpenLoopUnset) { + AdaptiveLoadSessionSpec original_spec; + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_TRUE(spec.nighthawk_traffic_template().open_loop().value()); +} + +TEST(SetSessionSpecDefaults, PreservesExplicitOpenLoopSetting) { + AdaptiveLoadSessionSpec original_spec; + original_spec.mutable_nighthawk_traffic_template()->mutable_open_loop()->set_value(false); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_FALSE(spec.nighthawk_traffic_template().open_loop().value()); +} + +TEST(SetSessionSpecDefaults, SetsDefaultMeasuringPeriodIfUnset) { + AdaptiveLoadSessionSpec original_spec; + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_EQ(spec.measuring_period().seconds(), 10); +} + +TEST(SetSessionSpecDefaults, PreservesExplicitMeasuringPeriod) { + const int kExpectedMeasuringPeriodSeconds = 123; + AdaptiveLoadSessionSpec original_spec; + original_spec.mutable_measuring_period()->set_seconds(kExpectedMeasuringPeriodSeconds); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_EQ(spec.measuring_period().seconds(), kExpectedMeasuringPeriodSeconds); +} + +TEST(SetSessionSpecDefaults, SetsDefaultConvergenceDeadlineIfUnset) { + AdaptiveLoadSessionSpec original_spec; + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_EQ(spec.convergence_deadline().seconds(), 300); +} + +TEST(SetSessionSpecDefaults, PreservesExplicitConvergenceDeadline) { + const int kExpectedConvergenceDeadlineSeconds = 123; + AdaptiveLoadSessionSpec original_spec; + original_spec.mutable_convergence_deadline()->set_seconds(kExpectedConvergenceDeadlineSeconds); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_EQ(spec.convergence_deadline().seconds(), kExpectedConvergenceDeadlineSeconds); +} + +TEST(SetSessionSpecDefaults, SetsDefaultTestingStageDurationIfUnset) { + AdaptiveLoadSessionSpec original_spec; + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_EQ(spec.testing_stage_duration().seconds(), 30); +} + +TEST(SetSessionSpecDefaults, PreservesExplicitTestingStageDuration) { + const int kExpectedTestingStageDurationSeconds = 123; + AdaptiveLoadSessionSpec original_spec; + original_spec.mutable_testing_stage_duration()->set_seconds(kExpectedTestingStageDurationSeconds); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + EXPECT_EQ(spec.testing_stage_duration().seconds(), kExpectedTestingStageDurationSeconds); +} + +TEST(SetSessionSpecDefaults, SetsDefaultScoredMetricPluginNameIfUnset) { + AdaptiveLoadSessionSpec original_spec; + (void)original_spec.mutable_metric_thresholds()->Add(); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + ASSERT_GT(spec.metric_thresholds_size(), 0); + EXPECT_EQ(spec.metric_thresholds(0).metric_spec().metrics_plugin_name(), "nighthawk.builtin"); +} + +TEST(SetSessionSpecDefaults, PreservesExplicitScoredMetricPluginName) { + const std::string kExpectedMetricsPluginName = "a"; + AdaptiveLoadSessionSpec original_spec; + nighthawk::adaptive_load::MetricSpecWithThreshold* spec_threshold = + original_spec.mutable_metric_thresholds()->Add(); + spec_threshold->mutable_metric_spec()->set_metrics_plugin_name(kExpectedMetricsPluginName); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + ASSERT_GT(spec.metric_thresholds_size(), 0); + EXPECT_EQ(spec.metric_thresholds(0).metric_spec().metrics_plugin_name(), + kExpectedMetricsPluginName); +} + +TEST(SetSessionSpecDefaults, SetsDefaultScoredMetricWeightIfUnset) { + AdaptiveLoadSessionSpec original_spec; + (void)original_spec.mutable_metric_thresholds()->Add(); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + ASSERT_GT(spec.metric_thresholds_size(), 0); + EXPECT_EQ(spec.metric_thresholds(0).threshold_spec().weight().value(), 1.0); +} + +TEST(SetSessionSpecDefaults, PreservesExplicitScoredMetricWeight) { + const double kExpectedWeight = 123.0; + AdaptiveLoadSessionSpec original_spec; + nighthawk::adaptive_load::MetricSpecWithThreshold* spec_threshold = + original_spec.mutable_metric_thresholds()->Add(); + spec_threshold->mutable_threshold_spec()->mutable_weight()->set_value(kExpectedWeight); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + ASSERT_GT(spec.metric_thresholds_size(), 0); + EXPECT_EQ(spec.metric_thresholds(0).threshold_spec().weight().value(), kExpectedWeight); +} + +TEST(SetSessionSpecDefaults, SetsDefaultInformationalMetricPluginNameIfUnset) { + AdaptiveLoadSessionSpec original_spec; + (void)original_spec.mutable_informational_metric_specs()->Add(); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + ASSERT_GT(spec.informational_metric_specs_size(), 0); + EXPECT_EQ(spec.informational_metric_specs(0).metrics_plugin_name(), "nighthawk.builtin"); +} + +TEST(SetSessionSpecDefaults, PreservesExplicitInformationalMetricPluginName) { + const std::string kExpectedMetricsPluginName = "a"; + AdaptiveLoadSessionSpec original_spec; + nighthawk::adaptive_load::MetricSpec* metric_spec = + original_spec.mutable_informational_metric_specs()->Add(); + metric_spec->set_metrics_plugin_name(kExpectedMetricsPluginName); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + AdaptiveLoadSessionSpec spec = helper.SetSessionSpecDefaults(original_spec); + ASSERT_GT(spec.informational_metric_specs_size(), 0); + EXPECT_EQ(spec.informational_metric_specs(0).metrics_plugin_name(), kExpectedMetricsPluginName); +} + +TEST(CheckSessionSpec, RejectsDurationIfSet) { + AdaptiveLoadSessionSpec spec; + spec.mutable_nighthawk_traffic_template()->mutable_duration()->set_seconds(1); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("should not have |duration| set")); +} + +TEST(CheckSessionSpec, RejectsInvalidMetricsPlugin) { + AdaptiveLoadSessionSpec spec; + envoy::config::core::v3::TypedExtensionConfig metrics_plugin_config; + metrics_plugin_config.set_name("bogus"); + *spec.mutable_metrics_plugin_configs()->Add() = metrics_plugin_config; + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("Failed to load MetricsPlugin")); +} + +TEST(CheckSessionSpec, RejectsInvalidStepControllerPlugin) { + AdaptiveLoadSessionSpec spec; + spec.mutable_step_controller_config()->set_name("bogus"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("Failed to load StepController plugin")); +} + +TEST(CheckSessionSpec, RejectsInvalidScoringFunctionPlugin) { + AdaptiveLoadSessionSpec spec; + nighthawk::adaptive_load::MetricSpecWithThreshold* spec_threshold = + spec.mutable_metric_thresholds()->Add(); + spec_threshold->mutable_threshold_spec()->mutable_scoring_function()->set_name("bogus"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("Failed to load ScoringFunction plugin")); +} + +TEST(CheckSessionSpec, RejectsScoredMetricWithUndeclaredMetricsPluginName) { + AdaptiveLoadSessionSpec spec; + nighthawk::adaptive_load::MetricSpecWithThreshold* spec_threshold = + spec.mutable_metric_thresholds()->Add(); + spec_threshold->mutable_metric_spec()->set_metrics_plugin_name("bogus"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("nonexistent metrics_plugin_name")); +} + +TEST(CheckSessionSpec, RejectsInformationalMetricWithUndeclaredMetricsPluginName) { + AdaptiveLoadSessionSpec spec; + nighthawk::adaptive_load::MetricSpec* metric_spec = + spec.mutable_informational_metric_specs()->Add(); + metric_spec->set_metrics_plugin_name("bogus"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("nonexistent metrics_plugin_name")); +} + +TEST(CheckSessionSpec, RejectsScoredMetricWithNonexistentDefaultMetricsPluginMetric) { + AdaptiveLoadSessionSpec spec; + nighthawk::adaptive_load::MetricSpecWithThreshold* spec_threshold = + spec.mutable_metric_thresholds()->Add(); + spec_threshold->mutable_metric_spec()->set_metric_name("bogus"); + spec_threshold->mutable_metric_spec()->set_metrics_plugin_name("nighthawk.builtin"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("not implemented by plugin")); +} + +TEST(CheckSessionSpec, RejectsInformationalMetricWithNonexistentDefaultMetricsPluginMetric) { + AdaptiveLoadSessionSpec spec; + nighthawk::adaptive_load::MetricSpec* metric_spec = + spec.mutable_informational_metric_specs()->Add(); + metric_spec->set_metric_name("bogus"); + metric_spec->set_metrics_plugin_name("nighthawk.builtin"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("not implemented by plugin")); +} + +TEST(CheckSessionSpec, RejectsScoredMetricWithNonexistentCustomMetricsPluginMetric) { + AdaptiveLoadSessionSpec spec; + *spec.mutable_metrics_plugin_configs()->Add() = MakeFakeMetricsPluginTypedExtensionConfig( + nighthawk::adaptive_load::FakeMetricsPluginConfig()); + nighthawk::adaptive_load::MetricSpecWithThreshold* spec_threshold = + spec.mutable_metric_thresholds()->Add(); + spec_threshold->mutable_metric_spec()->set_metric_name("bogus"); + spec_threshold->mutable_metric_spec()->set_metrics_plugin_name("nighthawk.fake_metrics_plugin"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("not implemented by plugin")); +} + +TEST(CheckSessionSpec, RejectsInformationalMetricWithNonexistentCustomMetricsPluginMetric) { + AdaptiveLoadSessionSpec spec; + *spec.mutable_metrics_plugin_configs()->Add() = MakeFakeMetricsPluginTypedExtensionConfig( + nighthawk::adaptive_load::FakeMetricsPluginConfig()); + nighthawk::adaptive_load::MetricSpec* metric_spec = + spec.mutable_informational_metric_specs()->Add(); + metric_spec->set_metric_name("bogus"); + metric_spec->set_metrics_plugin_name("nighthawk.fake_metrics_plugin"); + AdaptiveLoadSessionSpecProtoHelperImpl helper; + absl::Status status = helper.CheckSessionSpec(spec); + EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument); + EXPECT_THAT(status.message(), HasSubstr("not implemented by plugin")); +} + +} // namespace +} // namespace Nighthawk From 4fe741b1dc4c670892611f0ae4758ed1ce07b909 Mon Sep 17 00:00:00 2001 From: dubious90 Date: Fri, 11 Sep 2020 12:39:52 -0400 Subject: [PATCH 02/63] Add dubious90 to the list of owners. (#524) - Add dubious90 to the list of owners. - Also add in recently discussed conventions about maintaining and contributing to nighthawk. Signed-off-by: Nathan Perry --- CONTRIBUTING.md | 2 ++ MAINTAINERS.md | 4 +++- OWNERS.md | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 485881382..9eabaf0a3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,6 +24,8 @@ Both API and implementation stability are important to Nighthawk. Since the API * Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#pr-review-policy-for-maintainers) with respect to maintainer review policy. * See [OWNERS.md](OWNERS.md) for the current list of maintainers. +* It is helpful if you apply the label `waiting-for-review` to any PRs that are ready to be reviewed by a maintainer. + * Reviewers will change the label to `waiting-for-changes` when responding. # DCO: Sign your work diff --git a/MAINTAINERS.md b/MAINTAINERS.md index bd255c79c..8535ce1a3 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -18,9 +18,11 @@ Envoy internals. - New features should be added to the [version history](docs/root/version_history.md). - Breaking changes to the [protobuf APIs](api/) are not allowed. - When merging, clean up the commit message so we get a nice history. By default, - github will compile a messages from all the commits that are squashed. + github will compile a message from all the commits that are squashed. The PR title and description should be a good starting point for the final commit message. (If it is not, it may be worth asking the PR author to update the description). +- Make sure that the DCO signoff is included in the final commit message. + - As a convention, it is appropriate to exclude content in the PR description that occurs after the signoff. ## Updates to the Envoy dependency diff --git a/OWNERS.md b/OWNERS.md index d53194152..69b50ca69 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -13,3 +13,5 @@ routing PRs, questions, etc. to the right place. * Envoy internals and architecture. * Jakub Sobon ([mum4k](https://github.com/mum4k)) (mumak@google.com) * APIs, general functionality and miscellany. +* Nathan Perry ([dubious90](https://github.com/dubious90)) (nbperry@google.com) + * APIs and general functionality. \ No newline at end of file From 846a3dfec02e5a599a6b45f880c6a9de3f2001ff Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Fri, 11 Sep 2020 18:49:10 +0200 Subject: [PATCH 03/63] Eliminate configuration references to Envoy's fault filter. (#523) Our own dynamic delay extension owns this now. Signed-off-by: Otto van der Schaaf --- ci/docker/default-config.yaml | 9 ++------- .../configurations/nighthawk_http_origin.yaml | 7 ------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/ci/docker/default-config.yaml b/ci/docker/default-config.yaml index b417a4a88..8c931f188 100644 --- a/ci/docker/default-config.yaml +++ b/ci/docker/default-config.yaml @@ -23,13 +23,8 @@ static_resources: domains: - "*" http_filters: - - name: envoy.fault - config: - max_active_faults: 100 - delay: - header_delay: {} - percentage: - numerator: 100 + - name: time-tracking + - name: dynamic-delay - name: test-server config: response_body_size: 10 diff --git a/test/integration/configurations/nighthawk_http_origin.yaml b/test/integration/configurations/nighthawk_http_origin.yaml index 08247b781..cf703ec64 100644 --- a/test/integration/configurations/nighthawk_http_origin.yaml +++ b/test/integration/configurations/nighthawk_http_origin.yaml @@ -24,13 +24,6 @@ static_resources: - "*" http_filters: - name: dynamic-delay - - name: envoy.fault - config: - max_active_faults: 100 - delay: - header_delay: {} - percentage: - numerator: 100 - name: test-server config: response_body_size: 10 From 582244a63c6afe2ff9f03a9417d0d09cdecfe235 Mon Sep 17 00:00:00 2001 From: eric846 <56563761+eric846@users.noreply.github.com> Date: Fri, 11 Sep 2020 20:21:56 -0400 Subject: [PATCH 04/63] Adaptive load helper interface cleanup and dep/header cleanup (#527) - Add missing `const` to helper methods. - Add a missing build dep - Add a missing `#pragma once ` Part 7 of splitting PR #483. Signed-off-by: eric846 <56563761+eric846@users.noreply.github.com> --- include/nighthawk/adaptive_load/BUILD | 1 + include/nighthawk/adaptive_load/metrics_evaluator.h | 3 ++- include/nighthawk/adaptive_load/session_spec_proto_helper.h | 4 ++-- include/nighthawk/common/nighthawk_service_client.h | 4 ++-- source/adaptive_load/session_spec_proto_helper_impl.cc | 4 ++-- source/adaptive_load/session_spec_proto_helper_impl.h | 4 ++-- source/common/nighthawk_service_client_impl.cc | 2 +- source/common/nighthawk_service_client_impl.h | 2 +- 8 files changed, 13 insertions(+), 11 deletions(-) diff --git a/include/nighthawk/adaptive_load/BUILD b/include/nighthawk/adaptive_load/BUILD index 1e0728795..fd658e9f0 100644 --- a/include/nighthawk/adaptive_load/BUILD +++ b/include/nighthawk/adaptive_load/BUILD @@ -35,6 +35,7 @@ envoy_basic_cc_library( "@envoy//include/envoy/common:base_includes", "@envoy//include/envoy/common:time_interface", "@envoy//include/envoy/config:typed_config_interface", + "@envoy//source/common/protobuf:protobuf_with_external_headers", ], ) diff --git a/include/nighthawk/adaptive_load/metrics_evaluator.h b/include/nighthawk/adaptive_load/metrics_evaluator.h index cc969a9dd..c72d03427 100644 --- a/include/nighthawk/adaptive_load/metrics_evaluator.h +++ b/include/nighthawk/adaptive_load/metrics_evaluator.h @@ -1,8 +1,9 @@ +#pragma once + #include "envoy/config/core/v3/base.pb.h" #include "nighthawk/adaptive_load/metrics_plugin.h" -#include "external/envoy/source/common/common/logger.h" #include "external/envoy/source/common/common/statusor.h" #include "external/envoy/source/common/protobuf/protobuf.h" diff --git a/include/nighthawk/adaptive_load/session_spec_proto_helper.h b/include/nighthawk/adaptive_load/session_spec_proto_helper.h index 06e77ded7..a8850c6b9 100644 --- a/include/nighthawk/adaptive_load/session_spec_proto_helper.h +++ b/include/nighthawk/adaptive_load/session_spec_proto_helper.h @@ -25,7 +25,7 @@ class AdaptiveLoadSessionSpecProtoHelper { * @return Adaptive load session spec with default values inserted. */ virtual nighthawk::adaptive_load::AdaptiveLoadSessionSpec - SetSessionSpecDefaults(nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) PURE; + SetSessionSpecDefaults(nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) const PURE; /** * Checks whether a session spec is valid: No forbidden fields in Nighthawk traffic spec; no bad @@ -37,7 +37,7 @@ class AdaptiveLoadSessionSpecProtoHelper { * @return Status OK if no problems were found, or InvalidArgument with all errors. */ virtual absl::Status - CheckSessionSpec(const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) PURE; + CheckSessionSpec(const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) const PURE; }; } // namespace Nighthawk \ No newline at end of file diff --git a/include/nighthawk/common/nighthawk_service_client.h b/include/nighthawk/common/nighthawk_service_client.h index 4226837c6..e49ecf654 100644 --- a/include/nighthawk/common/nighthawk_service_client.h +++ b/include/nighthawk/common/nighthawk_service_client.h @@ -10,7 +10,7 @@ namespace Nighthawk { /** - * An interface for interacting with a Nighthawk Service gRPC stub. + * An interface for a stateless helper that interacts with a Nighthawk Service gRPC stub. */ class NighthawkServiceClient { public: @@ -30,7 +30,7 @@ class NighthawkServiceClient { */ virtual absl::StatusOr PerformNighthawkBenchmark( nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, - const nighthawk::client::CommandLineOptions& command_line_options) PURE; + const nighthawk::client::CommandLineOptions& command_line_options) const PURE; }; } // namespace Nighthawk diff --git a/source/adaptive_load/session_spec_proto_helper_impl.cc b/source/adaptive_load/session_spec_proto_helper_impl.cc index 3c45d3d86..be8f53bcf 100644 --- a/source/adaptive_load/session_spec_proto_helper_impl.cc +++ b/source/adaptive_load/session_spec_proto_helper_impl.cc @@ -16,7 +16,7 @@ namespace Nighthawk { nighthawk::adaptive_load::AdaptiveLoadSessionSpec AdaptiveLoadSessionSpecProtoHelperImpl::SetSessionSpecDefaults( - nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) { + nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) const { if (!spec.nighthawk_traffic_template().has_open_loop()) { spec.mutable_nighthawk_traffic_template()->mutable_open_loop()->set_value(true); } @@ -48,7 +48,7 @@ AdaptiveLoadSessionSpecProtoHelperImpl::SetSessionSpecDefaults( } absl::Status AdaptiveLoadSessionSpecProtoHelperImpl::CheckSessionSpec( - const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) { + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) const { std::vector errors; if (spec.nighthawk_traffic_template().has_duration()) { errors.emplace_back( diff --git a/source/adaptive_load/session_spec_proto_helper_impl.h b/source/adaptive_load/session_spec_proto_helper_impl.h index d47e3a680..d26dcd586 100644 --- a/source/adaptive_load/session_spec_proto_helper_impl.h +++ b/source/adaptive_load/session_spec_proto_helper_impl.h @@ -5,10 +5,10 @@ namespace Nighthawk { class AdaptiveLoadSessionSpecProtoHelperImpl : public AdaptiveLoadSessionSpecProtoHelper { public: nighthawk::adaptive_load::AdaptiveLoadSessionSpec - SetSessionSpecDefaults(nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) override; + SetSessionSpecDefaults(nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec) const override; absl::Status - CheckSessionSpec(const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) override; + CheckSessionSpec(const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) const override; }; } // namespace Nighthawk diff --git a/source/common/nighthawk_service_client_impl.cc b/source/common/nighthawk_service_client_impl.cc index db8f1c7dd..10dc82588 100644 --- a/source/common/nighthawk_service_client_impl.cc +++ b/source/common/nighthawk_service_client_impl.cc @@ -7,7 +7,7 @@ namespace Nighthawk { absl::StatusOr NighthawkServiceClientImpl::PerformNighthawkBenchmark( nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, - const nighthawk::client::CommandLineOptions& command_line_options) { + const nighthawk::client::CommandLineOptions& command_line_options) const { nighthawk::client::ExecutionRequest request; nighthawk::client::ExecutionResponse response; *request.mutable_start_request()->mutable_options() = command_line_options; diff --git a/source/common/nighthawk_service_client_impl.h b/source/common/nighthawk_service_client_impl.h index 2dd29862e..d8a14eb44 100644 --- a/source/common/nighthawk_service_client_impl.h +++ b/source/common/nighthawk_service_client_impl.h @@ -19,7 +19,7 @@ class NighthawkServiceClientImpl : public NighthawkServiceClient { public: absl::StatusOr PerformNighthawkBenchmark( nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, - const nighthawk::client::CommandLineOptions& command_line_options) override; + const nighthawk::client::CommandLineOptions& command_line_options) const override; }; } // namespace Nighthawk From e3913bb054a247c3ef8c62e5ece0cfaee8329821 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 14 Sep 2020 17:29:23 +0200 Subject: [PATCH 05/63] Source/server/configuration.cc/h: const options arg. (#530) Split out from #512 Signed-off-by: Otto van der Schaaf --- source/server/configuration.cc | 2 +- source/server/configuration.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/server/configuration.cc b/source/server/configuration.cc index 8ddc92fb6..a6037bf14 100644 --- a/source/server/configuration.cc +++ b/source/server/configuration.cc @@ -29,7 +29,7 @@ bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& } void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, - nighthawk::server::ResponseOptions& response_options) { + const nighthawk::server::ResponseOptions& response_options) { for (const auto& header_value_option : response_options.response_headers()) { const auto& header = header_value_option.header(); auto lower_case_key = Envoy::Http::LowerCaseString(header.key()); diff --git a/source/server/configuration.h b/source/server/configuration.h index ec1f77165..e44cca4a6 100644 --- a/source/server/configuration.h +++ b/source/server/configuration.h @@ -29,7 +29,7 @@ bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& * @param response_options Configuration specifying how to transform the header map. */ void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, - nighthawk::server::ResponseOptions& response_options); + const nighthawk::server::ResponseOptions& response_options); } // namespace Configuration } // namespace Server From 44ba1caa0633f42736f78a22413df1429349a95d Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 14 Sep 2020 20:38:49 +0200 Subject: [PATCH 06/63] Enhance end-to-end test for POST requests with an entity body. (#532) Make the end-to-end test for POST handling cover all extensions. Split out from #512 Signed-off-by: Otto van der Schaaf --- .../configurations/nighthawk_http_origin.yaml | 1 + test/integration/test_integration_basics.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/test/integration/configurations/nighthawk_http_origin.yaml b/test/integration/configurations/nighthawk_http_origin.yaml index cf703ec64..4854523de 100644 --- a/test/integration/configurations/nighthawk_http_origin.yaml +++ b/test/integration/configurations/nighthawk_http_origin.yaml @@ -23,6 +23,7 @@ static_resources: domains: - "*" http_filters: + - name: time-tracking - name: dynamic-delay - name: test-server config: diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 4ffa0e0ba..5639fd99d 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -414,8 +414,11 @@ def test_cli_output_format(http_test_server_fixture): asserts.assertIn("Percentile", output) -def test_request_body_gets_transmitted(http_test_server_fixture): - """Test request body transmission. +@pytest.mark.parametrize( + 'filter_configs', + ["{}", "{static_delay: \"0.01s\"}", "{emit_previous_request_delta_in_response_header: \"aa\"}"]) +def test_request_body_gets_transmitted(http_test_server_fixture, filter_configs): + """Test request body transmission handling code for our extensions. Ensure that the number of bytes we request for the request body gets reflected in the upstream connection transmitted bytes counter for h1 and h2. @@ -433,14 +436,16 @@ def check_upload_expectations(fixture, parsed_json, expected_transmitted_bytes, "http.ingress_http.downstream_cx_rx_bytes_total"), expected_received_bytes) - upload_bytes = 1024 * 1024 * 3 + # TODO(#531): The dynamic-delay extension hangs unless we lower the request entity body size. + upload_bytes = 1024 * 1024 if "static_delay" in filter_configs else 1024 * 1024 * 3 requests = 10 args = [ http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--rps", "100", "--request-body-size", str(upload_bytes), "--termination-predicate", "benchmark.http_2xx:%s" % str(requests), "--connections", "1", "--request-method", "POST", - "--max-active-requests", "1" + "--max-active-requests", "1", "--request-header", + "x-nighthawk-test-server-config:%s" % filter_configs ] # Test we transmit the expected amount of bytes with H1 parsed_json, _ = http_test_server_fixture.runNighthawkClient(args) From e744a103756e9242342662442ddb308382e26c8b Mon Sep 17 00:00:00 2001 From: eric846 <56563761+eric846@users.noreply.github.com> Date: Mon, 14 Sep 2020 16:32:52 -0400 Subject: [PATCH 07/63] Adaptive load helper mocks (#529) - MockNighthawkServiceClient - MockMetricsEvaluator - MockAdaptiveLoadSessionSpecProtoHelper Part 8 of splitting PR #483. Signed-off-by: eric846 <56563761+eric846@users.noreply.github.com> --- test/mocks/adaptive_load/BUILD | 29 ++++++++++++ .../adaptive_load/mock_metrics_evaluator.cc | 7 +++ .../adaptive_load/mock_metrics_evaluator.h | 46 +++++++++++++++++++ .../mock_session_spec_proto_helper.cc | 7 +++ .../mock_session_spec_proto_helper.h | 42 +++++++++++++++++ test/mocks/common/BUILD | 10 ++++ .../common/mock_nighthawk_service_client.cc | 7 +++ .../common/mock_nighthawk_service_client.h | 32 +++++++++++++ 8 files changed, 180 insertions(+) create mode 100644 test/mocks/adaptive_load/BUILD create mode 100644 test/mocks/adaptive_load/mock_metrics_evaluator.cc create mode 100644 test/mocks/adaptive_load/mock_metrics_evaluator.h create mode 100644 test/mocks/adaptive_load/mock_session_spec_proto_helper.cc create mode 100644 test/mocks/adaptive_load/mock_session_spec_proto_helper.h create mode 100644 test/mocks/common/mock_nighthawk_service_client.cc create mode 100644 test/mocks/common/mock_nighthawk_service_client.h diff --git a/test/mocks/adaptive_load/BUILD b/test/mocks/adaptive_load/BUILD new file mode 100644 index 000000000..ff68762f6 --- /dev/null +++ b/test/mocks/adaptive_load/BUILD @@ -0,0 +1,29 @@ +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_mock( + name = "mock_metrics_evaluator", + srcs = ["mock_metrics_evaluator.cc"], + hdrs = ["mock_metrics_evaluator.h"], + repository = "@envoy", + deps = [ + "//include/nighthawk/adaptive_load:metrics_evaluator", + ], +) + +envoy_cc_mock( + name = "mock_session_spec_proto_helper", + srcs = ["mock_session_spec_proto_helper.cc"], + hdrs = ["mock_session_spec_proto_helper.h"], + repository = "@envoy", + deps = [ + "//include/nighthawk/adaptive_load:session_spec_proto_helper", + ], +) diff --git a/test/mocks/adaptive_load/mock_metrics_evaluator.cc b/test/mocks/adaptive_load/mock_metrics_evaluator.cc new file mode 100644 index 000000000..c3d602a38 --- /dev/null +++ b/test/mocks/adaptive_load/mock_metrics_evaluator.cc @@ -0,0 +1,7 @@ +#include "test/mocks/adaptive_load/mock_metrics_evaluator.h" + +namespace Nighthawk { + +MockMetricsEvaluator::MockMetricsEvaluator() = default; + +} // namespace Nighthawk diff --git a/test/mocks/adaptive_load/mock_metrics_evaluator.h b/test/mocks/adaptive_load/mock_metrics_evaluator.h new file mode 100644 index 000000000..d7824eacd --- /dev/null +++ b/test/mocks/adaptive_load/mock_metrics_evaluator.h @@ -0,0 +1,46 @@ +#pragma once + +#include "nighthawk/adaptive_load/metrics_evaluator.h" + +#include "gmock/gmock.h" + +namespace Nighthawk { + +/** + * A mock MetricsEvaluator that returns empty values from all methods. + * + * Typical usage: + * + * MockMetricsEvaluator mock_metrics_evaluator; + * BenchmarkResult benchmark_result; + * // (set benchmark_result fields here) + * EXPECT_CALL(mock_metrics_evaluator, AnalyzeNighthawkBenchmark(_, _, _)) + * .WillRepeatedly(Return(benchmark_result)); + */ +class MockMetricsEvaluator : public MetricsEvaluator { +public: + /** + * Empty constructor. + */ + MockMetricsEvaluator(); + + MOCK_CONST_METHOD3(EvaluateMetric, + absl::StatusOr( + const nighthawk::adaptive_load::MetricSpec& metric_spec, + MetricsPlugin& metrics_plugin, + const nighthawk::adaptive_load::ThresholdSpec* threshold_spec)); + + MOCK_CONST_METHOD1(ExtractMetricSpecs, + const std::vector>( + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec)); + + MOCK_CONST_METHOD3( + AnalyzeNighthawkBenchmark, + absl::StatusOr( + const nighthawk::client::ExecutionResponse& execution_response, + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec, + const absl::flat_hash_map& name_to_custom_plugin_map)); +}; + +} // namespace Nighthawk diff --git a/test/mocks/adaptive_load/mock_session_spec_proto_helper.cc b/test/mocks/adaptive_load/mock_session_spec_proto_helper.cc new file mode 100644 index 000000000..0d7dbd40b --- /dev/null +++ b/test/mocks/adaptive_load/mock_session_spec_proto_helper.cc @@ -0,0 +1,7 @@ +#include "test/mocks/adaptive_load/mock_session_spec_proto_helper.h" + +namespace Nighthawk { + +MockAdaptiveLoadSessionSpecProtoHelper::MockAdaptiveLoadSessionSpecProtoHelper() = default; + +} // namespace Nighthawk diff --git a/test/mocks/adaptive_load/mock_session_spec_proto_helper.h b/test/mocks/adaptive_load/mock_session_spec_proto_helper.h new file mode 100644 index 000000000..49dda391a --- /dev/null +++ b/test/mocks/adaptive_load/mock_session_spec_proto_helper.h @@ -0,0 +1,42 @@ +#pragma once + +#include "nighthawk/adaptive_load/session_spec_proto_helper.h" + +#include "gmock/gmock.h" + +namespace Nighthawk { + +/** + * A mock AdaptiveLoadSessionSpecProtoHelper that returns empty values or success from all methods + * by default. + * + * In particular, SetSessionSpecDefaults does not pass its input value through to its output; + * regardless of the output, it returns an empty proto, unless explicitly configured (see below). If + * you don't need to inspect calls to the spec proto helper, it may be easier to use the real + * AdaptiveLoadSessionSpecProtoHelperImpl in tests instead. + * + * Typical usage: + * + * NiceMock mock_spec_proto_helper; + * EXPECT_CALL(mock_spec_proto_helper, CheckSessionSpec(_)) + * .WillOnce(Return(absl::OkStatus())); + * AdaptiveLoadSessionSpec spec; + * // Set spec fields here, including providing all defaults yourself. + * EXPECT_CALL(mock_spec_proto_helper, SetSessionSpecDefaults(_)) + * .WillOnce(Return(spec)); + */ +class MockAdaptiveLoadSessionSpecProtoHelper : public AdaptiveLoadSessionSpecProtoHelper { +public: + /** + * Empty constructor. + */ + MockAdaptiveLoadSessionSpecProtoHelper(); + + MOCK_CONST_METHOD1(SetSessionSpecDefaults, + nighthawk::adaptive_load::AdaptiveLoadSessionSpec( + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec spec)); + MOCK_CONST_METHOD1(CheckSessionSpec, + absl::Status(const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec)); +}; + +} // namespace Nighthawk diff --git a/test/mocks/common/BUILD b/test/mocks/common/BUILD index e74f25e1c..9770301ae 100644 --- a/test/mocks/common/BUILD +++ b/test/mocks/common/BUILD @@ -8,6 +8,16 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_mock( + name = "mock_nighthawk_service_client", + srcs = ["mock_nighthawk_service_client.cc"], + hdrs = ["mock_nighthawk_service_client.h"], + repository = "@envoy", + deps = [ + "//include/nighthawk/common:nighthawk_service_client", + ], +) + envoy_cc_mock( name = "mock_rate_limiter", srcs = ["mock_rate_limiter.cc"], diff --git a/test/mocks/common/mock_nighthawk_service_client.cc b/test/mocks/common/mock_nighthawk_service_client.cc new file mode 100644 index 000000000..9c84170e6 --- /dev/null +++ b/test/mocks/common/mock_nighthawk_service_client.cc @@ -0,0 +1,7 @@ +#include "test/mocks/common/mock_nighthawk_service_client.h" + +namespace Nighthawk { + +MockNighthawkServiceClient::MockNighthawkServiceClient() = default; + +} // namespace Nighthawk diff --git a/test/mocks/common/mock_nighthawk_service_client.h b/test/mocks/common/mock_nighthawk_service_client.h new file mode 100644 index 000000000..66e3bdb46 --- /dev/null +++ b/test/mocks/common/mock_nighthawk_service_client.h @@ -0,0 +1,32 @@ +#pragma once + +#include "nighthawk/common/nighthawk_service_client.h" + +#include "gmock/gmock.h" + +namespace Nighthawk { + +/** + * A mock NighthawkServiceClient that returns an empty response by default. + * + * Typical usage: + * + * NiceMock mock_nighthawk_service_client; + * nighthawk::client::ExecutionResponse nighthawk_response; + * EXPECT_CALL(mock_nighthawk_service_client, PerformNighthawkBenchmark(_, _)) + * .WillRepeatedly(Return(nighthawk_response)); + */ +class MockNighthawkServiceClient : public NighthawkServiceClient { +public: + /** + * Empty constructor. + */ + MockNighthawkServiceClient(); + + MOCK_CONST_METHOD2(PerformNighthawkBenchmark, + absl::StatusOr( + nighthawk::client::NighthawkService::StubInterface* stub, + const nighthawk::client::CommandLineOptions& options)); +}; + +} // namespace Nighthawk From f434a439fada23ef09b3d8a55dcba3d91371cc27 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 15 Sep 2020 17:45:25 +0200 Subject: [PATCH 08/63] Add opt_build option to ci/do_ci.sh. (#536) This is helpful to assist with building Nighthawk's production ready artifacts via docker build images. For example: ```bash DOCKER_IMAGE=envoyproxy/envoy-build-[centos|ubuntu] \ ci/run_envoy_docker.sh ci/do_ci.sh opt_build ``` Related issue: #525 Signed-off-by: Otto van der Schaaf --- ci/do_ci.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 4188eecb4..ebe08e643 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -258,8 +258,13 @@ case "$1" in do_benchmark_with_own_binaries exit 0 ;; + opt_build) + setup_clang_toolchain + do_opt_build + exit 0 + ;; *) - echo "must be one of [build,test,clang_tidy,coverage,coverage_integration,asan,tsan,benchmark_with_own_binaries,docker,check_format,fix_format,test_gcc]" + echo "must be one of [opt_build, build,test,clang_tidy,coverage,coverage_integration,asan,tsan,benchmark_with_own_binaries,docker,check_format,fix_format,test_gcc]" exit 1 ;; esac From 398bd3e305ec0d0ae958fded7c5208aa8f6c8b6d Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 16 Sep 2020 18:03:21 +0200 Subject: [PATCH 09/63] Test benchmark docker image build in CI (#537) Now that we have a benchmark docker image build, it would be good to make sure that keeps working in CI. This also makes it easy to start pushing this image in CI in a follow-up. Signed-off-by: Otto van der Schaaf --- ci/do_ci.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index ebe08e643..8d430b706 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -25,6 +25,7 @@ function do_build () { function do_opt_build () { bazel build $BAZEL_BUILD_OPTIONS -c opt //:nighthawk + bazel build $BAZEL_BUILD_OPTIONS -c opt //benchmarks:benchmarks } function do_test() { @@ -143,6 +144,7 @@ function do_docker() { do_opt_build ./ci/docker/docker_build.sh ./ci/docker/docker_push.sh + ./ci/docker/benchmark_build.sh } function do_fix_format() { From 5876039bdb2635caf4f311dd39807b9b91f73e69 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Thu, 17 Sep 2020 00:52:25 +0200 Subject: [PATCH 10/63] Add HttpFilterIntegrationTestBase (#517) Prelude to #512, which includes it and provides a means to see the end goal. - Extracts shared needs between tested extensions. - Sanitized api, better naming. - Doc comments. - Support for testing POST request methods and entity bodies to check the alternative flow that will trigger in extensions when using that. Signed-off-by: Otto van der Schaaf --- test/server/BUILD | 16 +++- .../http_filter_integration_test_base.cc | 63 ++++++++++++++ .../http_filter_integration_test_base.h | 85 +++++++++++++++++++ 3 files changed, 162 insertions(+), 2 deletions(-) create mode 100644 test/server/http_filter_integration_test_base.cc create mode 100644 test/server/http_filter_integration_test_base.h diff --git a/test/server/BUILD b/test/server/BUILD index 0ea078851..71ae178ab 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -1,6 +1,7 @@ load( "@envoy//bazel:envoy_build_system.bzl", "envoy_cc_test", + "envoy_cc_test_library", "envoy_package", ) @@ -8,6 +9,17 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_test_library( + name = "http_filter_integration_test_base_lib", + srcs = ["http_filter_integration_test_base.cc"], + hdrs = ["http_filter_integration_test_base.h"], + repository = "@envoy", + deps = [ + "//source/server:well_known_headers_lib", + "@envoy//test/integration:http_integration_lib", + ], +) + envoy_cc_test( name = "http_test_server_filter_integration_test", srcs = ["http_test_server_filter_integration_test.cc"], @@ -25,9 +37,9 @@ envoy_cc_test( srcs = ["http_dynamic_delay_filter_integration_test.cc"], repository = "@envoy", deps = [ + ":http_filter_integration_test_base_lib", "//source/server:http_dynamic_delay_filter_config", "@envoy//source/common/api:api_lib_with_external_headers", - "@envoy//test/integration:http_integration_lib", ], ) @@ -36,10 +48,10 @@ envoy_cc_test( srcs = ["http_time_tracking_filter_integration_test.cc"], repository = "@envoy", deps = [ + ":http_filter_integration_test_base_lib", "//source/server:http_time_tracking_filter_config", "@envoy//include/envoy/upstream:cluster_manager_interface_with_external_headers", "@envoy//source/common/api:api_lib_with_external_headers", - "@envoy//test/integration:http_integration_lib", "@envoy//test/test_common:simulated_time_system_lib", ], ) diff --git a/test/server/http_filter_integration_test_base.cc b/test/server/http_filter_integration_test_base.cc new file mode 100644 index 000000000..d204133ad --- /dev/null +++ b/test/server/http_filter_integration_test_base.cc @@ -0,0 +1,63 @@ +#include "test/server/http_filter_integration_test_base.h" + +#include "server/well_known_headers.h" + +#include "gtest/gtest.h" + +namespace Nighthawk { + +HttpFilterIntegrationTestBase::HttpFilterIntegrationTestBase( + Envoy::Network::Address::IpVersion ip_version) + : HttpIntegrationTest(Envoy::Http::CodecClient::Type::HTTP1, ip_version), + request_headers_({{":method", "GET"}, {":path", "/"}, {":authority", "host"}}) {} + +void HttpFilterIntegrationTestBase::initializeFilterConfiguration(absl::string_view config) { + config_helper_.addFilter(std::string(config)); + HttpIntegrationTest::initialize(); +} + +void HttpFilterIntegrationTestBase::setRequestLevelConfiguration( + absl::string_view request_level_config) { + setRequestHeader(Server::TestServer::HeaderNames::get().TestServerConfig, request_level_config); +} + +void HttpFilterIntegrationTestBase::switchToPostWithEntityBody() { + setRequestHeader(Envoy::Http::Headers::get().Method, + Envoy::Http::Headers::get().MethodValues.Post); +} + +void HttpFilterIntegrationTestBase::setRequestHeader( + const Envoy::Http::LowerCaseString& header_name, absl::string_view header_value) { + request_headers_.setCopy(header_name, header_value); +} + +Envoy::IntegrationStreamDecoderPtr +HttpFilterIntegrationTestBase::getResponse(ResponseOrigin expected_origin) { + cleanupUpstreamAndDownstream(); + codec_client_ = makeHttpConnection(lookupPort("http")); + Envoy::IntegrationStreamDecoderPtr response; + const bool is_post = request_headers_.Method()->value().getStringView() == + Envoy::Http::Headers::get().MethodValues.Post; + // Upon observing a POST request method, we inject a content body, as promised in the header file. + // This is useful, because emitting an entity body will hit distinct code in extensions. Hence we + // facilitate that. + const uint64_t request_body_size = is_post ? 1024 : 0; + // An extension can either act as an origin and synthesize a reply, or delegate that + // responsibility to an upstream. This behavior may change from request to request. For example, + // an extension is designed to transform input from an upstream, may start acting as an origin on + // misconfiguration. + if (expected_origin == ResponseOrigin::UPSTREAM) { + response = sendRequestAndWaitForResponse(request_headers_, request_body_size, + default_response_headers_, /*response_body_size*/ 0); + } else { + if (is_post) { + response = codec_client_->makeRequestWithBody(request_headers_, request_body_size); + } else { + response = codec_client_->makeHeaderOnlyRequest(request_headers_); + } + response->waitForEndStream(); + } + return response; +} + +} // namespace Nighthawk diff --git a/test/server/http_filter_integration_test_base.h b/test/server/http_filter_integration_test_base.h new file mode 100644 index 000000000..8027753ec --- /dev/null +++ b/test/server/http_filter_integration_test_base.h @@ -0,0 +1,85 @@ +#pragma once + +#include + +#include "external/envoy/test/integration/http_integration.h" + +namespace Nighthawk { + +/** + * Base class with shared functionality for testing Nighthawk test server http filter extensions. + * The class is stateful, and not safe to use from multiple threads. + */ +class HttpFilterIntegrationTestBase : public Envoy::HttpIntegrationTest { +protected: + /** + * Indicate the expected response origin. A test failure will occur upon mismatch. + */ + enum class ResponseOrigin { + /** + * The upstream will supply the response, and not the extension under test. + */ + UPSTREAM, + /** + * The extension under test will suplly a response, and no upstream will be set up to do that. + */ + EXTENSION + }; + /** + * Construct a new HttpFilterIntegrationTestBase instance. + * + * @param ip_version Specify the ip version that the integration test server will use to listen + * for connections. + */ + HttpFilterIntegrationTestBase(Envoy::Network::Address::IpVersion ip_version); + + /** + * We don't override SetUp(): tests using this fixture must call initializeFilterConfiguration() + * instead. This is to avoid imposing the need to create a fixture per filter configuration. + * + * @param filter_configuration Pass configuration for the filter under test. Will be handed off to + * Envoy::HttpIntegrationTest::config_helper_.addFilter. + */ + void initializeFilterConfiguration(absl::string_view filter_configuration); + + /** + * Make getResponse send request-level configuration. Test server extensions read that + * configuration and merge it with their static configuration to determine a final effective + * configuration. See TestServerConfig in well_known_headers.h for the up to date header name. + * + * @param request_level_config Configuration to be delivered by request-header in future calls to + * getResponse(). For example: "{response_body_size:1024}". + */ + void setRequestLevelConfiguration(absl::string_view request_level_config); + + /** + * Switch getResponse() to use the POST request method with an entity body. + * Doing so will make tests hit a different code paths in extensions. + */ + void switchToPostWithEntityBody(); + + /** + * Set a request header value. Overwrites any existing value. + * + * @param header_name Name of the request header to set. + * @param header_value Value to set for the request header. + */ + void setRequestHeader(const Envoy::Http::LowerCaseString& header_name, + absl::string_view header_value); + + /** + * Fetch a response, according to the options specified by the class methods. By default, + * simulates a GET request with minimal headers. + * @param expected_origin Indicate which component will be expected to reply: the extension or + * a fake upstream. Will cause a test failure upon mismatch. Can be used to verify that an + * extension short circuits and directly responds when expected. + * @return Envoy::IntegrationStreamDecoderPtr Pointer to the integration stream decoder, which can + * be used to inspect the response. + */ + Envoy::IntegrationStreamDecoderPtr getResponse(ResponseOrigin expected_origin); + +private: + Envoy::Http::TestRequestHeaderMapImpl request_headers_; +}; + +} // namespace Nighthawk From 8b16dc1da919a5152112049107fc045894abf071 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Thu, 17 Sep 2020 21:10:10 +0200 Subject: [PATCH 11/63] Add experimental "pedantic" fortio output formatter. (#534) Adds a post-processing step to massage the fortio output formatting to more exactly reproduce Fortio's output, while leaving the original output formatter intact for backwards compatibility purposes. Fixes known issues #422 and #514. Experimental until we have some confirmation that this can be marked as final / all issues have been resolved. Signed-off-by: Otto van der Schaaf --- README.md | 19 +- api/client/options.proto | 1 + source/client/output_formatter_impl.cc | 21 + source/client/output_formatter_impl.h | 21 + test/BUILD | 1 + test/output_formatter_test.cc | 17 +- ...tput_formatter.medium.fortio-noquirks.gold | 483 ++++++++++++++++++ 7 files changed, 553 insertions(+), 10 deletions(-) create mode 100644 test/test_data/output_formatter.medium.fortio-noquirks.gold diff --git a/README.md b/README.md index 7b87c3a14..e5bdb24ba 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,8 @@ format>] [--sequencer-idle-strategy ] [--address-family ] [--burst-size ] [--prefetch-connections] [--output-format -] [-v ] [-v ] [--concurrency ] [--h2] [--timeout ] [--duration ] @@ -220,9 +221,11 @@ Release requests in bursts of the specified size (default: 0). --prefetch-connections Use proactive connection prefetching (HTTP/1 only). ---output-format +--output-format Output format. Possible values: {"json", "human", "yaml", "dotted", -"fortio"}. The default output format is 'human'. +"fortio", "experimental_fortio_pedantic"}. The default output format +is 'human'. -v , --verbosity @@ -329,15 +332,17 @@ Nighthawk comes with a tool to transform its json output to its other supported USAGE: bazel-bin/nighthawk_output_transform --output-format [--] [--version] -[-h] +|dotted|fortio +|experimental_fortio_pedantic> [--] +[--version] [-h] Where: ---output-format +--output-format (required) Output format. Possible values: {"json", "human", "yaml", -"dotted", "fortio"}. +"dotted", "fortio", "experimental_fortio_pedantic"}. --, --ignore_rest Ignores the rest of the labeled arguments following this flag. diff --git a/api/client/options.proto b/api/client/options.proto index 3cf370141..6e2aa1841 100644 --- a/api/client/options.proto +++ b/api/client/options.proto @@ -58,6 +58,7 @@ message OutputFormat { YAML = 3; DOTTED = 4; FORTIO = 5; + EXPERIMENTAL_FORTIO_PEDANTIC = 6; } OutputFormatOptions value = 1; } diff --git a/source/client/output_formatter_impl.cc b/source/client/output_formatter_impl.cc index 81276ae84..aa4eedd96 100644 --- a/source/client/output_formatter_impl.cc +++ b/source/client/output_formatter_impl.cc @@ -3,6 +3,7 @@ #include #include +#include #include #include "nighthawk/common/exception.h" @@ -393,5 +394,25 @@ const nighthawk::client::DurationHistogram FortioOutputFormatterImpl::renderFort return fortio_histogram; } +std::string +FortioPedanticOutputFormatterImpl::formatProto(const nighthawk::client::Output& output) const { + std::string res = FortioOutputFormatterImpl::formatProto(output); + // clang-format off + // Fix two types of quirks. We disable linting because we use std::regex directly. + // This should be OK as the regular expression we use can be trusted. + // 1. We misdefined RequestedRPS as an int, whereas Fortio outputs that as a string. + res = std::regex_replace(res, std::regex(R"EOF("RequestedQPS"\: ([0-9]*))EOF"), + R"EOF("RequestedQPS": "$1")EOF"); + // 2. Our uint64's get serialized as json strings. Fortio outputs them as json integers. + // An example of a string that would match the regular expression below would be: + // "Count": "100", which then would be replaced to look like: "Count": 100. + // NOTE: [0-9][0-9][0-9] looks for string fields referring to http status codes, which get counted. + res = std::regex_replace( + res, std::regex(R"EOF("([0-9][0-9][0-9]|Count|BytesSent|BytesReceived)"\: "([0-9]*)")EOF"), + R"EOF("$1": $2)EOF"); + // clang-format on + return res; +} + } // namespace Client } // namespace Nighthawk \ No newline at end of file diff --git a/source/client/output_formatter_impl.h b/source/client/output_formatter_impl.h index ff21bc44f..37fa96d1d 100644 --- a/source/client/output_formatter_impl.h +++ b/source/client/output_formatter_impl.h @@ -108,5 +108,26 @@ class FortioOutputFormatterImpl : public OutputFormatterImpl { double durationToSeconds(const Envoy::ProtobufWkt::Duration& duration) const; }; +/** + * Applies corrections to the output of the original FortioOutputFormatterImpl class, + * to make the output adhere better to Fortio's actual output. + * In particular, the proto json serializer outputs 64 bits integers as strings, whereas + * Fortio outputs them unquoted / as integers, trusting that consumers side can take that + * well. We also fix the RequestedQPS field which was defined as an integer, but gets + * represented as a string in Fortio's json output. + */ +class FortioPedanticOutputFormatterImpl : public FortioOutputFormatterImpl { +public: + /** + * Format Nighthawk's native output proto to Fortio's output format. + * This relies on the base class to provide the initial render, and applies + * post processing to make corrections afterwards. + * + * @param output Nighthawk's native output proto that will be transformed. + * @return std::string Fortio formatted json string. + */ + std::string formatProto(const nighthawk::client::Output& output) const override; +}; + } // namespace Client } // namespace Nighthawk \ No newline at end of file diff --git a/test/BUILD b/test/BUILD index f88f59e3a..ecb8d5039 100644 --- a/test/BUILD +++ b/test/BUILD @@ -118,6 +118,7 @@ envoy_cc_test( "test_data/output_formatter.dotted.gold", "test_data/output_formatter.json.gold", "test_data/output_formatter.medium.fortio.gold", + "test_data/output_formatter.medium.fortio-noquirks.gold", "test_data/output_formatter.medium.proto.gold", "test_data/output_formatter.txt.gold", "test_data/output_formatter.yaml.gold", diff --git a/test/output_formatter_test.cc b/test/output_formatter_test.cc index d734bddb4..8e23185b7 100644 --- a/test/output_formatter_test.cc +++ b/test/output_formatter_test.cc @@ -126,7 +126,8 @@ TEST_F(OutputCollectorTest, GetLowerCaseOutputFormats) { auto output_formats = OutputFormatterImpl::getLowerCaseOutputFormats(); // When you're looking at this code you probably just added an output format. // This is to point out that you might want to update the list below and add a test above. - ASSERT_THAT(output_formats, ElementsAre("json", "human", "yaml", "dotted", "fortio")); + ASSERT_THAT(output_formats, ElementsAre("json", "human", "yaml", "dotted", "fortio", + "experimental_fortio_pedantic")); } class FortioOutputCollectorTest : public OutputCollectorTest { @@ -186,7 +187,8 @@ class MediumOutputCollectorTest : public OutputCollectorTest { }; TEST_F(MediumOutputCollectorTest, FortioFormatter) { - const auto input_proto = loadProtoFromFile("test/test_data/output_formatter.medium.proto.gold"); + const nighthawk::client::Output input_proto = + loadProtoFromFile("test/test_data/output_formatter.medium.proto.gold"); FortioOutputFormatterImpl formatter; expectEqualToGoldFile(formatter.formatProto(input_proto), "test/test_data/output_formatter.medium.fortio.gold"); @@ -202,7 +204,8 @@ TEST_F(MediumOutputCollectorTest, FortioFormatter0sJitterUniformGetsReflected) { } TEST_F(MediumOutputCollectorTest, ConsoleOutputFormatter) { - const auto input_proto = loadProtoFromFile("test/test_data/percentile-column-overflow.json"); + const nighthawk::client::Output input_proto = + loadProtoFromFile("test/test_data/percentile-column-overflow.json"); ConsoleOutputFormatterImpl formatter; expectEqualToGoldFile(formatter.formatProto(input_proto), "test/test_data/percentile-column-overflow.txt.gold"); @@ -224,5 +227,13 @@ TEST_F(StatidToNameTest, TestTranslations) { } } +TEST_F(MediumOutputCollectorTest, FortioPedanticFormatter) { + const nighthawk::client::Output input_proto = + loadProtoFromFile("test/test_data/output_formatter.medium.proto.gold"); + FortioPedanticOutputFormatterImpl formatter; + expectEqualToGoldFile(formatter.formatProto(input_proto), + "test/test_data/output_formatter.medium.fortio-noquirks.gold"); +} + } // namespace Client } // namespace Nighthawk diff --git a/test/test_data/output_formatter.medium.fortio-noquirks.gold b/test/test_data/output_formatter.medium.fortio-noquirks.gold new file mode 100644 index 000000000..ed8b4c799 --- /dev/null +++ b/test/test_data/output_formatter.medium.fortio-noquirks.gold @@ -0,0 +1,483 @@ +{ + "Labels": "label-a label-b Nighthawk", + "RequestedQPS": "30", + "ActualQPS": 27.99999399400129, + "ActualDuration": 2000000429, + "NumThreads": 300, + "DurationHistogram": { + "Count": 53, + "Data": [ + { + "Start": 0.053798911, + "End": 0.053798911, + "Percent": 0, + "Count": 1 + }, + { + "Start": 0.053798911, + "End": 0.056340479, + "Percent": 10, + "Count": 5 + }, + { + "Start": 0.056340479, + "End": 0.057706495, + "Percent": 20, + "Count": 5 + }, + { + "Start": 0.057706495, + "End": 0.060055551, + "Percent": 30, + "Count": 5 + }, + { + "Start": 0.060055551, + "End": 0.062949375, + "Percent": 40, + "Count": 6 + }, + { + "Start": 0.062949375, + "End": 0.065077247, + "Percent": 50, + "Count": 5 + }, + { + "Start": 0.065077247, + "End": 0.066666495, + "Percent": 55.000000000000007, + "Count": 3 + }, + { + "Start": 0.066666495, + "End": 0.067948543, + "Percent": 60, + "Count": 2 + }, + { + "Start": 0.067948543, + "End": 0.068751359, + "Percent": 65, + "Count": 3 + }, + { + "Start": 0.068751359, + "End": 0.072949759, + "Percent": 70, + "Count": 3 + }, + { + "Start": 0.072949759, + "End": 0.074465279, + "Percent": 75, + "Count": 2 + }, + { + "Start": 0.074465279, + "End": 0.075792383, + "Percent": 77.5, + "Count": 2 + }, + { + "Start": 0.075792383, + "End": 0.078065663, + "Percent": 80, + "Count": 1 + }, + { + "Start": 0.078065663, + "End": 0.078168063, + "Percent": 82.5, + "Count": 1 + }, + { + "Start": 0.078168063, + "End": 0.080400383, + "Percent": 85, + "Count": 2 + }, + { + "Start": 0.080400383, + "End": 0.082427903, + "Percent": 87.5, + "Count": 1 + }, + { + "Start": 0.082427903, + "End": 0.085045247, + "Percent": 88.75, + "Count": 1 + }, + { + "Start": 0.085045247, + "End": 0.085045247, + "Percent": 90, + "Count": 0 + }, + { + "Start": 0.085045247, + "End": 0.085626879, + "Percent": 91.25, + "Count": 1 + }, + { + "Start": 0.085626879, + "End": 0.090656767, + "Percent": 92.5, + "Count": 1 + }, + { + "Start": 0.090656767, + "End": 0.090656767, + "Percent": 93.75, + "Count": 0 + }, + { + "Start": 0.090656767, + "End": 0.247513087, + "Percent": 94.375, + "Count": 1 + }, + { + "Start": 0.247513087, + "End": 0.247513087, + "Percent": 95, + "Count": 0 + }, + { + "Start": 0.247513087, + "End": 0.247513087, + "Percent": 95.625, + "Count": 0 + }, + { + "Start": 0.247513087, + "End": 0.281362431, + "Percent": 96.25, + "Count": 1 + }, + { + "Start": 0.281362431, + "End": 0.281362431, + "Percent": 96.875, + "Count": 0 + }, + { + "Start": 0.281362431, + "End": 0.281362431, + "Percent": 97.1875, + "Count": 0 + }, + { + "Start": 0.281362431, + "End": 0.281362431, + "Percent": 97.5, + "Count": 0 + }, + { + "Start": 0.281362431, + "End": 0.281362431, + "Percent": 97.8125, + "Count": 0 + }, + { + "Start": 0.281362431, + "End": 0.310886399, + "Percent": 98.125, + "Count": 1 + }, + { + "Start": 0.310886399, + "End": 0.310886399, + "Percent": 100, + "Count": 0 + } + ], + "Min": 0.053798911, + "Max": 0.310886399, + "Sum": 4.156954618, + "Avg": 0.078433106, + "StdDev": 0.05052302, + "Percentiles": [ + { + "Percentile": 50, + "Value": 0.065077247 + }, + { + "Percentile": 75, + "Value": 0.074465279 + }, + { + "Percentile": 80, + "Value": 0.078065663 + }, + { + "Percentile": 90, + "Value": 0.085045247 + }, + { + "Percentile": 95, + "Value": 0.247513087 + } + ] + }, + "RetCodes": { + "200": 56 + }, + "URL": "https://www.google.com/", + "Version": "0.0.0", + "Jitter": true, + "RunType": "HTTP", + "Sizes": { + "Count": 56, + "Data": [ + { + "Start": 847, + "End": 847, + "Percent": 0, + "Count": 56 + }, + { + "Start": 847, + "End": 847, + "Percent": 100, + "Count": 0 + } + ], + "Min": 847, + "Max": 847, + "Sum": 47432, + "Avg": 847, + "StdDev": 0, + "Percentiles": [] + }, + "HeaderSizes": { + "Count": 56, + "Data": [ + { + "Start": 47257, + "End": 47257, + "Percent": 0, + "Count": 1 + }, + { + "Start": 47257, + "End": 47289, + "Percent": 10, + "Count": 5 + }, + { + "Start": 47289, + "End": 47299, + "Percent": 20, + "Count": 6 + }, + { + "Start": 47299, + "End": 47305, + "Percent": 30, + "Count": 5 + }, + { + "Start": 47305, + "End": 47311, + "Percent": 40, + "Count": 7 + }, + { + "Start": 47311, + "End": 47317, + "Percent": 50, + "Count": 7 + }, + { + "Start": 47317, + "End": 47317, + "Percent": 55.000000000000007, + "Count": 0 + }, + { + "Start": 47317, + "End": 47321, + "Percent": 60, + "Count": 4 + }, + { + "Start": 47321, + "End": 47323, + "Percent": 65, + "Count": 2 + }, + { + "Start": 47323, + "End": 47327, + "Percent": 70, + "Count": 5 + }, + { + "Start": 47327, + "End": 47327, + "Percent": 75, + "Count": 0 + }, + { + "Start": 47327, + "End": 47329, + "Percent": 77.5, + "Count": 2 + }, + { + "Start": 47329, + "End": 47331, + "Percent": 80, + "Count": 3 + }, + { + "Start": 47331, + "End": 47331, + "Percent": 82.5, + "Count": 0 + }, + { + "Start": 47331, + "End": 47333, + "Percent": 85, + "Count": 2 + }, + { + "Start": 47333, + "End": 47333, + "Percent": 87.5, + "Count": 0 + }, + { + "Start": 47333, + "End": 47335, + "Percent": 88.75, + "Count": 1 + }, + { + "Start": 47335, + "End": 47339, + "Percent": 90, + "Count": 1 + }, + { + "Start": 47339, + "End": 47341, + "Percent": 91.25, + "Count": 1 + }, + { + "Start": 47341, + "End": 47341, + "Percent": 92.5, + "Count": 0 + }, + { + "Start": 47341, + "End": 47347, + "Percent": 93.75, + "Count": 1 + }, + { + "Start": 47347, + "End": 47347, + "Percent": 94.375, + "Count": 0 + }, + { + "Start": 47347, + "End": 47353, + "Percent": 95, + "Count": 2 + }, + { + "Start": 47353, + "End": 47353, + "Percent": 95.625, + "Count": 0 + }, + { + "Start": 47353, + "End": 47353, + "Percent": 96.25, + "Count": 0 + }, + { + "Start": 47353, + "End": 47353, + "Percent": 96.875, + "Count": 0 + }, + { + "Start": 47353, + "End": 47353, + "Percent": 97.1875, + "Count": 0 + }, + { + "Start": 47353, + "End": 47353, + "Percent": 97.5, + "Count": 0 + }, + { + "Start": 47353, + "End": 47353, + "Percent": 97.8125, + "Count": 0 + }, + { + "Start": 47353, + "End": 47353, + "Percent": 98.125, + "Count": 0 + }, + { + "Start": 47353, + "End": 47357, + "Percent": 98.4375, + "Count": 1 + }, + { + "Start": 47357, + "End": 47357, + "Percent": 100, + "Count": 0 + } + ], + "Min": 47257, + "Max": 47357, + "Sum": 2649612, + "Avg": 47314.5, + "StdDev": 20.799210149838451, + "Percentiles": [ + { + "Percentile": 50, + "Value": 47317 + }, + { + "Percentile": 75, + "Value": 47327 + }, + { + "Percentile": 80, + "Value": 47331 + }, + { + "Percentile": 90, + "Value": 47339 + }, + { + "Percentile": 95, + "Value": 47353 + } + ] + }, + "BytesSent": 3528, + "BytesReceived": 2702142, + "StartTime": "2020-01-11T12:47:57.259885200Z", + "RequestedDuration": "2s" +} From 6442e38acfea0ea29865efa28558e656d9c09d3f Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Fri, 18 Sep 2020 04:45:12 +0200 Subject: [PATCH 12/63] I forgot the last step, which is to actually teach the output (#544) formatter factory when to construct the new formatter. Sorry! - Amends a pre-existing test to avoid regression when adding new output formats in the future - Fix the issue Fixes #543 Signed-off-by: Otto van der Schaaf --- source/client/factories_impl.cc | 2 ++ test/output_transform_main_test.cc | 21 ++++++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index ba1cb9e56..6a35458f7 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -108,6 +108,8 @@ OutputFormatterPtr OutputFormatterFactoryImpl::create( return std::make_unique(); case nighthawk::client::OutputFormat::FORTIO: return std::make_unique(); + case nighthawk::client::OutputFormat::EXPERIMENTAL_FORTIO_PEDANTIC: + return std::make_unique(); default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/test/output_transform_main_test.cc b/test/output_transform_main_test.cc index 4454391b3..43df89261 100644 --- a/test/output_transform_main_test.cc +++ b/test/output_transform_main_test.cc @@ -6,6 +6,7 @@ #include "api/client/service.pb.h" +#include "client/output_formatter_impl.h" #include "client/output_transform_main.h" #include "gtest/gtest.h" @@ -54,13 +55,19 @@ TEST_F(OutputTransformMainTest, JsonNotValidating) { EXPECT_NE(main.run(), 0); } -TEST_F(OutputTransformMainTest, HappyFlow) { - std::vector argv = {"foo", "--output-format", "human"}; - nighthawk::client::Output output; - output.mutable_options()->mutable_uri()->set_value("http://127.0.0.1/"); - stream_ << Envoy::MessageUtil::getJsonStringFromMessage(output, true, true); - OutputTransformMain main(argv.size(), argv.data(), stream_); - EXPECT_EQ(main.run(), 0); +TEST_F(OutputTransformMainTest, HappyFlowForAllOutputFormats) { + for (const std::string& output_format : OutputFormatterImpl::getLowerCaseOutputFormats()) { + std::vector argv = {"foo", "--output-format", output_format.c_str()}; + nighthawk::client::Output output; + if (output_format.find("fortio") != std::string::npos) { + // The fortio output formatter mandates at least a single global result or it throws. + output.add_results()->set_name("global"); + } + output.mutable_options()->mutable_uri()->set_value("http://127.0.0.1/"); + stream_ << Envoy::MessageUtil::getJsonStringFromMessage(output, true, true); + OutputTransformMain main(argv.size(), argv.data(), stream_); + EXPECT_EQ(main.run(), 0); + } } } // namespace Client From 8fcf416d1e31bf1f6f2e8688c86f24495aef87b7 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Fri, 18 Sep 2020 04:46:01 +0200 Subject: [PATCH 13/63] Update Envoy & HdrHistogram_c (#545) - Update to Envoy 9eeba8fd427d9bd0ef947ec14a3157083cc7bf0e - Update HdrHistogram_c from 0.11.0 -> 0.11.1 Signed-off-by: Otto van der Schaaf --- .bazelrc | 3 +++ bazel/repositories.bzl | 8 ++++---- source/server/README.md | 13 ++++++++++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/.bazelrc b/.bazelrc index 4401c0dea..12995e93c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -137,6 +137,7 @@ build:coverage --action_env=GCOV=llvm-profdata build:coverage --copt=-DNDEBUG # 1.5x original timeout + 300s for trace merger in all categories build:coverage --test_timeout=390,750,1500,5700 +build:coverage --define=dynamic_link_tests=true build:coverage --define=ENVOY_CONFIG_COVERAGE=1 build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support @@ -282,6 +283,8 @@ build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 +build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link +build:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link # Compile database generation config build:compdb --build_tag_filters=-nocompdb diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 1033d49ba..7c2264c93 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,10 +1,10 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "5187c347f878e322ca9c78807446189c1335fb5c" # September 9th, 2020 -ENVOY_SHA = "10f2307883a12676c1b5da3b44a2075cc32433a7fa6fd475937634f119b18ea1" +ENVOY_COMMIT = "9eeba8fd427d9bd0ef947ec14a3157083cc7bf0e" # September 17th, 2020 +ENVOY_SHA = "4537bde6652ea00db9b45c126c0519619909bc0d79c6ede02d68a8782f8c1c67" -HDR_HISTOGRAM_C_VERSION = "0.11.0" # July 14th, 2020 -HDR_HISTOGRAM_C_SHA = "c00696b3d81776675aa2bc62d3642e31bd8a48cc9619c9bd7d4a78762896e353" +HDR_HISTOGRAM_C_VERSION = "0.11.1" # September 17th, 2020 +HDR_HISTOGRAM_C_SHA = "8550071d4ae5c8229448f9b68469d6d42c620cd25111b49c696d00185e5f8329" def nighthawk_dependencies(): http_archive( diff --git a/source/server/README.md b/source/server/README.md index 299332c72..d98631126 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -159,9 +159,10 @@ same time. ``` USAGE: -bazel-bin/nighthawk_test_server [--disable-extensions ] -[--use-fake-symbol-table ] -[--cpuset-threads] +bazel-bin/nighthawk_test_server [--socket-mode ] [--socket-path +] [--disable-extensions +] [--use-fake-symbol-table +] [--cpuset-threads] [--enable-mutex-tracing] [--disable-hot-restart] [--mode ] [--parent-shutdown-time-s @@ -195,6 +196,12 @@ bazel-bin/nighthawk_test_server [--disable-extensions ] Where: +--socket-mode +Socket file permission + +--socket-path +Path to hot restart socket file + --disable-extensions Comma-separated list of extensions to disable From 040af225c7fd21722a841233a5cff38391f15b26 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Fri, 18 Sep 2020 22:11:01 +0200 Subject: [PATCH 14/63] Switch dynamic delay and time-tracking to use FilterConfigurationBase. (#540) Second step in unifying configuration handling across extensions: make these two extensions inherit from and use FilterConfigurationBase. Fix POST handling. Split out from #512 Signed-off-by: Otto van der Schaaf --- source/server/BUILD | 16 +++++--- source/server/http_dynamic_delay_filter.cc | 45 +++++++++++----------- source/server/http_dynamic_delay_filter.h | 29 +++----------- source/server/http_filter_config_base.cc | 4 +- source/server/http_filter_config_base.h | 1 - source/server/http_time_tracking_filter.cc | 34 ++++++++-------- source/server/http_time_tracking_filter.h | 17 +++----- 7 files changed, 63 insertions(+), 83 deletions(-) diff --git a/source/server/BUILD b/source/server/BUILD index dde36af5a..33e9a6dfc 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -20,11 +20,20 @@ envoy_cc_library( envoy_cc_library( name = "configuration_lib", - srcs = ["configuration.cc"], - hdrs = ["configuration.h"], + srcs = [ + "configuration.cc", + "http_filter_config_base.cc", + ], + hdrs = [ + "configuration.h", + "http_filter_config_base.h", + ], repository = "@envoy", deps = [ + ":well_known_headers_lib", "//api/server:response_options_proto_cc_proto", + "@envoy//include/envoy/server:filter_config_interface_with_external_headers", + "@envoy//source/common/common:statusor_lib_with_external_headers", "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", "@envoy//source/common/protobuf:utility_lib_with_external_headers", "@envoy//source/common/singleton:const_singleton_with_external_headers", @@ -38,7 +47,6 @@ envoy_cc_library( repository = "@envoy", deps = [ ":configuration_lib", - ":well_known_headers_lib", "//api/server:response_options_proto_cc_proto", "@envoy//source/exe:envoy_common_lib_with_external_headers", ], @@ -51,7 +59,6 @@ envoy_cc_library( repository = "@envoy", deps = [ ":configuration_lib", - ":well_known_headers_lib", "//api/server:response_options_proto_cc_proto", "//source/common:thread_safe_monotonic_time_stopwatch_lib", "@envoy//source/exe:envoy_common_lib_with_external_headers", @@ -66,7 +73,6 @@ envoy_cc_library( repository = "@envoy", deps = [ ":configuration_lib", - ":well_known_headers_lib", "//api/server:response_options_proto_cc_proto", "@envoy//source/exe:envoy_common_lib_with_external_headers", "@envoy//source/extensions/filters/http/fault:fault_filter_lib_with_external_headers", diff --git a/source/server/http_dynamic_delay_filter.cc b/source/server/http_dynamic_delay_filter.cc index 542cedefa..ebc3254fc 100644 --- a/source/server/http_dynamic_delay_filter.cc +++ b/source/server/http_dynamic_delay_filter.cc @@ -13,10 +13,10 @@ namespace Nighthawk { namespace Server { HttpDynamicDelayDecoderFilterConfig::HttpDynamicDelayDecoderFilterConfig( - nighthawk::server::ResponseOptions proto_config, Envoy::Runtime::Loader& runtime, + const nighthawk::server::ResponseOptions& proto_config, Envoy::Runtime::Loader& runtime, const std::string& stats_prefix, Envoy::Stats::Scope& scope, Envoy::TimeSource& time_source) - : server_config_(std::move(proto_config)), runtime_(runtime), - stats_prefix_(absl::StrCat(stats_prefix, "dynamic-delay.")), scope_(scope), + : FilterConfigurationBase(proto_config, "dynamic-delay"), runtime_(runtime), + stats_prefix_(absl::StrCat(stats_prefix, fmt::format("{}.", filter_name()))), scope_(scope), time_source_(time_source) {} HttpDynamicDelayDecoderFilter::HttpDynamicDelayDecoderFilter( @@ -40,32 +40,31 @@ void HttpDynamicDelayDecoderFilter::onDestroy() { Envoy::Http::FilterHeadersStatus HttpDynamicDelayDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool end_stream) { - response_options_ = config_->server_config(); - std::string error_message; - if (!computeResponseOptions(headers, error_message)) { - decoder_callbacks_->sendLocalReply( - static_cast(500), - fmt::format("dynamic-delay didn't understand the request: {}", error_message), nullptr, - absl::nullopt, ""); - return Envoy::Http::FilterHeadersStatus::StopIteration; + config_->computeEffectiveConfiguration(headers); + if (config_->getEffectiveConfiguration().ok()) { + const absl::optional delay_ms = computeDelayMs( + *config_->getEffectiveConfiguration().value(), config_->approximateFilterInstances()); + maybeRequestFaultFilterDelay(delay_ms, headers); + } else { + if (end_stream) { + config_->maybeSendErrorReply(*decoder_callbacks_); + return Envoy::Http::FilterHeadersStatus::StopIteration; + } + return Envoy::Http::FilterHeadersStatus::Continue; } - const absl::optional delay_ms = - computeDelayMs(response_options_, config_->approximateFilterInstances()); - maybeRequestFaultFilterDelay(delay_ms, headers); return Envoy::Extensions::HttpFilters::Fault::FaultFilter::decodeHeaders(headers, end_stream); } -bool HttpDynamicDelayDecoderFilter::computeResponseOptions( - const Envoy::Http::RequestHeaderMap& headers, std::string& error_message) { - response_options_ = config_->server_config(); - const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); - if (request_config_header) { - if (!Configuration::mergeJsonConfig(request_config_header->value().getStringView(), - response_options_, error_message)) { - return false; +Envoy::Http::FilterDataStatus +HttpDynamicDelayDecoderFilter::decodeData(Envoy::Buffer::Instance& buffer, bool end_stream) { + if (!config_->getEffectiveConfiguration().ok()) { + if (end_stream) { + config_->maybeSendErrorReply(*decoder_callbacks_); + return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; } + return Envoy::Http::FilterDataStatus::Continue; } - return true; + return Envoy::Extensions::HttpFilters::Fault::FaultFilter::decodeData(buffer, end_stream); } absl::optional HttpDynamicDelayDecoderFilter::computeDelayMs( diff --git a/source/server/http_dynamic_delay_filter.h b/source/server/http_dynamic_delay_filter.h index 4c60a1822..654a3d752 100644 --- a/source/server/http_dynamic_delay_filter.h +++ b/source/server/http_dynamic_delay_filter.h @@ -9,6 +9,8 @@ #include "api/server/response_options.pb.h" +#include "server/http_filter_config_base.h" + namespace Nighthawk { namespace Server { @@ -17,7 +19,7 @@ namespace Server { * Instances of this class will be shared accross instances of HttpDynamicDelayDecoderFilter. * The methods for getting and manipulating (global) active filter instance counts are thread safe. */ -class HttpDynamicDelayDecoderFilterConfig { +class HttpDynamicDelayDecoderFilterConfig : public FilterConfigurationBase { public: /** @@ -31,17 +33,10 @@ class HttpDynamicDelayDecoderFilterConfig { * @param scope Statistics scope to be used by the filter. * @param time_source Time source to be used by the filter. */ - HttpDynamicDelayDecoderFilterConfig(nighthawk::server::ResponseOptions proto_config, + HttpDynamicDelayDecoderFilterConfig(const nighthawk::server::ResponseOptions& proto_config, Envoy::Runtime::Loader& runtime, const std::string& stats_prefix, Envoy::Stats::Scope& scope, Envoy::TimeSource& time_source); - - /** - * @return const nighthawk::server::ResponseOptions& read-only reference to the proto config - * object. - */ - const nighthawk::server::ResponseOptions& server_config() const { return server_config_; } - /** * Increments the number of globally active filter instances. */ @@ -79,7 +74,6 @@ class HttpDynamicDelayDecoderFilterConfig { std::string stats_prefix() { return stats_prefix_; } private: - const nighthawk::server::ResponseOptions server_config_; static std::atomic& instances() { // We lazy-init the atomic to avoid static initialization / a fiasco. MUTABLE_CONSTRUCT_ON_FIRST_USE(std::atomic, 0); // NOLINT @@ -112,21 +106,9 @@ class HttpDynamicDelayDecoderFilter : public Envoy::Extensions::HttpFilters::Fau // Http::StreamDecoderFilter Envoy::Http::FilterHeadersStatus decodeHeaders(Envoy::Http::RequestHeaderMap&, bool) override; + Envoy::Http::FilterDataStatus decodeData(Envoy::Buffer::Instance&, bool) override; void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks&) override; - /** - * Compute the response options based on the static configuration and optional configuration - * provided via the request headers. After a successfull call the response_options_ field will - * have been modified to reflect request-level configuration. - * - * @param request_headers The request headers set to inspect for configuration. - * @param error_message Set to an error message if the request-level configuration couldn't be - * interpreted. - * @return true iff the configuration was successfully computed. - */ - bool computeResponseOptions(const Envoy::Http::RequestHeaderMap& request_headers, - std::string& error_message); - /** * Compute the concurrency based linear delay in milliseconds. * @@ -179,7 +161,6 @@ class HttpDynamicDelayDecoderFilter : public Envoy::Extensions::HttpFilters::Fau private: const HttpDynamicDelayDecoderFilterConfigSharedPtr config_; Envoy::Http::StreamDecoderFilterCallbacks* decoder_callbacks_; - nighthawk::server::ResponseOptions response_options_; bool destroyed_{false}; }; diff --git a/source/server/http_filter_config_base.cc b/source/server/http_filter_config_base.cc index cd1d52e14..a8dc933e9 100644 --- a/source/server/http_filter_config_base.cc +++ b/source/server/http_filter_config_base.cc @@ -1,12 +1,14 @@ #include "server/http_filter_config_base.h" +#include "server/well_known_headers.h" + namespace Nighthawk { namespace Server { FilterConfigurationBase::FilterConfigurationBase( const nighthawk::server::ResponseOptions& proto_config, absl::string_view filter_name) : filter_name_(filter_name), - server_config_(std::make_shared(std::move(proto_config))), + server_config_(std::make_shared(proto_config)), effective_config_(server_config_) {} void FilterConfigurationBase::computeEffectiveConfiguration( diff --git a/source/server/http_filter_config_base.h b/source/server/http_filter_config_base.h index 88d20405e..9f3f700fe 100644 --- a/source/server/http_filter_config_base.h +++ b/source/server/http_filter_config_base.h @@ -9,7 +9,6 @@ #include "api/server/response_options.pb.h" #include "server/configuration.h" -#include "server/well_known_headers.h" #include "absl/status/status.h" diff --git a/source/server/http_time_tracking_filter.cc b/source/server/http_time_tracking_filter.cc index a17b9cb98..045192d0e 100644 --- a/source/server/http_time_tracking_filter.cc +++ b/source/server/http_time_tracking_filter.cc @@ -16,8 +16,8 @@ namespace Nighthawk { namespace Server { HttpTimeTrackingFilterConfig::HttpTimeTrackingFilterConfig( - nighthawk::server::ResponseOptions proto_config) - : server_config_(std::move(proto_config)), + const nighthawk::server::ResponseOptions& proto_config) + : FilterConfigurationBase(proto_config, "time-tracking"), stopwatch_(std::make_unique()) {} uint64_t @@ -29,28 +29,28 @@ HttpTimeTrackingFilter::HttpTimeTrackingFilter(HttpTimeTrackingFilterConfigShare : config_(std::move(config)) {} Envoy::Http::FilterHeadersStatus -HttpTimeTrackingFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool /*end_stream*/) { - base_config_ = config_->server_config(); - const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); - if (request_config_header) { - json_merge_error_ = !Configuration::mergeJsonConfig( - request_config_header->value().getStringView(), base_config_, error_message_); - if (json_merge_error_) { - decoder_callbacks_->sendLocalReply( - static_cast(500), - fmt::format("time-tracking didn't understand the request: {}", error_message_), nullptr, - absl::nullopt, ""); - return Envoy::Http::FilterHeadersStatus::StopIteration; - } +HttpTimeTrackingFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool end_stream) { + config_->computeEffectiveConfiguration(headers); + if (end_stream && config_->maybeSendErrorReply(*decoder_callbacks_)) { + return Envoy::Http::FilterHeadersStatus::StopIteration; } return Envoy::Http::FilterHeadersStatus::Continue; } +Envoy::Http::FilterDataStatus HttpTimeTrackingFilter::decodeData(Envoy::Buffer::Instance&, + bool end_stream) { + if (end_stream && config_->maybeSendErrorReply(*decoder_callbacks_)) { + return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; + } + return Envoy::Http::FilterDataStatus::Continue; +} + Envoy::Http::FilterHeadersStatus HttpTimeTrackingFilter::encodeHeaders(Envoy::Http::ResponseHeaderMap& response_headers, bool) { - if (!json_merge_error_) { + const auto effective_config = config_->getEffectiveConfiguration(); + if (effective_config.ok()) { const std::string previous_request_delta_in_response_header = - base_config_.emit_previous_request_delta_in_response_header(); + effective_config.value()->emit_previous_request_delta_in_response_header(); if (!previous_request_delta_in_response_header.empty() && last_request_delta_ns_ > 0) { response_headers.appendCopy( Envoy::Http::LowerCaseString(previous_request_delta_in_response_header), diff --git a/source/server/http_time_tracking_filter.h b/source/server/http_time_tracking_filter.h index b3148fed3..6f08b42f5 100644 --- a/source/server/http_time_tracking_filter.h +++ b/source/server/http_time_tracking_filter.h @@ -11,6 +11,8 @@ #include "api/server/response_options.pb.h" +#include "server/http_filter_config_base.h" + namespace Nighthawk { namespace Server { @@ -18,20 +20,14 @@ namespace Server { * Filter configuration container class for the time tracking extension. * Instances of this class will be shared accross instances of HttpTimeTrackingFilter. */ -class HttpTimeTrackingFilterConfig { +class HttpTimeTrackingFilterConfig : public FilterConfigurationBase { public: /** * Constructs a new HttpTimeTrackingFilterConfig instance. * * @param proto_config The proto configuration of the filter. */ - HttpTimeTrackingFilterConfig(nighthawk::server::ResponseOptions proto_config); - - /** - * @return const nighthawk::server::ResponseOptions& read-only reference to the proto config - * object. - */ - const nighthawk::server::ResponseOptions& server_config() { return server_config_; } + HttpTimeTrackingFilterConfig(const nighthawk::server::ResponseOptions& proto_config); /** * Gets the number of elapsed nanoseconds since the last call (server wide). @@ -44,7 +40,6 @@ class HttpTimeTrackingFilterConfig { uint64_t getElapsedNanosSinceLastRequest(Envoy::TimeSource& time_source); private: - const nighthawk::server::ResponseOptions server_config_; std::unique_ptr stopwatch_; }; @@ -65,6 +60,7 @@ class HttpTimeTrackingFilter : public Envoy::Http::PassThroughFilter { // Http::StreamDecoderFilter Envoy::Http::FilterHeadersStatus decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool /*end_stream*/) override; + Envoy::Http::FilterDataStatus decodeData(Envoy::Buffer::Instance&, bool) override; void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks&) override; // Http::StreamEncoderFilter @@ -72,9 +68,6 @@ class HttpTimeTrackingFilter : public Envoy::Http::PassThroughFilter { private: const HttpTimeTrackingFilterConfigSharedPtr config_; - nighthawk::server::ResponseOptions base_config_; - bool json_merge_error_{false}; - std::string error_message_; uint64_t last_request_delta_ns_; }; From 8ed1f689f738347626c57f3a0a4f50fb655ceab6 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 22 Sep 2020 21:47:11 +0200 Subject: [PATCH 15/63] Implement RemoteProcessImpl in terms of PerformNighthawkBenchmark() (#548) Drop duplicated code, use the newer code that has better test coverage plus handling for when multiple results would be received. Fixes #496 Signed-off-by: Otto van der Schaaf --- source/client/BUILD | 1 + source/client/remote_process_impl.cc | 37 +++++++++------------------- source/client/remote_process_impl.h | 2 ++ 3 files changed, 14 insertions(+), 26 deletions(-) diff --git a/source/client/BUILD b/source/client/BUILD index 20619d93e..4723f1ac7 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -48,6 +48,7 @@ envoy_cc_library( "//include/nighthawk/common:base_includes", "//source/common:request_source_impl_lib", "//source/common:nighthawk_common_lib", + "//source/common:nighthawk_service_client_impl", "@envoy//source/common/common:random_generator_lib_with_external_headers", "@envoy//source/common/access_log:access_log_manager_lib_with_external_headers", "@envoy//source/common/api:api_lib_with_external_headers", diff --git a/source/client/remote_process_impl.cc b/source/client/remote_process_impl.cc index 05e0b859f..d69922eb4 100644 --- a/source/client/remote_process_impl.cc +++ b/source/client/remote_process_impl.cc @@ -9,6 +9,7 @@ #include "api/client/options.pb.h" #include "api/client/output.pb.h" +#include "common/nighthawk_service_client_impl.h" #include "common/uri_impl.h" #include "client/options_impl.h" @@ -18,39 +19,23 @@ namespace Client { RemoteProcessImpl::RemoteProcessImpl(const Options& options, nighthawk::client::NighthawkService::Stub& stub) - : options_(options), stub_(stub) {} + : options_(options), service_client_(std::make_unique()), + stub_(stub) {} bool RemoteProcessImpl::run(OutputCollector& collector) { - nighthawk::client::ExecutionRequest request; - nighthawk::client::ExecutionResponse response; - grpc::ClientContext context; - auto execution_stream = stub_.ExecutionStream(&context); - - *request.mutable_start_request()->mutable_options() = *options_.toCommandLineOptions(); + Nighthawk::Client::CommandLineOptionsPtr options = options_.toCommandLineOptions(); // We don't forward the option that requests remote execution. Today, // nighthawk_service will ignore the option, but if someone ever changes that this // is probably desireable. - request.mutable_start_request()->mutable_options()->mutable_nighthawk_service()->Clear(); + options->mutable_nighthawk_service()->Clear(); - if (execution_stream->Write(request, {}) && execution_stream->Read(&response)) { - if (response.has_output()) { - collector.setOutput(response.output()); - } else { - ENVOY_LOG(error, "Remote execution failed"); - } - if (response.has_error_detail()) { - ENVOY_LOG(error, "have error detail: {}", response.error_detail().DebugString()); - } - if (!execution_stream->WritesDone()) { - ENVOY_LOG(warn, "writeDone() failed"); - } else { - auto status = execution_stream->Finish(); - return status.ok(); - } - } else { - ENVOY_LOG(error, "Failure while communicating with the remote service"); + const absl::StatusOr result = + service_client_->PerformNighthawkBenchmark(&stub_, *options); + if (result.ok()) { + collector.setOutput(result.value().output()); + return true; } - + ENVOY_LOG(error, "Remote execution failure: {}", result.status().message()); return false; } diff --git a/source/client/remote_process_impl.h b/source/client/remote_process_impl.h index 243121cfc..e689e1c0d 100644 --- a/source/client/remote_process_impl.h +++ b/source/client/remote_process_impl.h @@ -3,6 +3,7 @@ #include "nighthawk/client/options.h" #include "nighthawk/client/output_collector.h" #include "nighthawk/client/process.h" +#include "nighthawk/common/nighthawk_service_client.h" #include "external/envoy/source/common/common/logger.h" @@ -38,6 +39,7 @@ class RemoteProcessImpl : public Process, public Envoy::Logger::Loggable service_client_; nighthawk::client::NighthawkService::Stub& stub_; }; From 401f09ec0e6d057f35a08cc35d04932c0c2e56d1 Mon Sep 17 00:00:00 2001 From: eric846 <56563761+eric846@users.noreply.github.com> Date: Tue, 22 Sep 2020 16:13:31 -0400 Subject: [PATCH 16/63] Adaptive Load Controller main loop (#535) This is the main function of the Adaptive Load Controller library: - Check the input proto for errors - Apply default input values - Adjusting Stage loop: - Get the latest dynamically generated CommandLineOptions from the StepController - Run a short benchmark with the Nighthawk Service - Obtain values from MetricsPlugins - Score metrics using ScoringFunction plugins - Report scores back to the StepController, which recalculates the load for the next iteration - Check for convergence deadline exceeded - Check for StepController convergence - Check for StepController doom - Testing Stage: Run one long benchmark on the Nighthawk Service at the converged load Fixes #485. Part 9 of splitting PR #483. Signed-off-by: eric846 <56563761+eric846@users.noreply.github.com> --- .../adaptive_load/adaptive_load_controller.h | 51 +-- source/adaptive_load/BUILD | 24 ++ .../adaptive_load_controller_impl.cc | 205 ++++++++++++ .../adaptive_load_controller_impl.h | 83 +++++ test/adaptive_load/BUILD | 24 ++ .../adaptive_load_controller_test.cc | 303 ++++++++++++++++++ .../fake_step_controller.cc | 2 +- .../fake_step_controller.h | 2 +- .../fake_step_controller_test.cc | 7 +- 9 files changed, 675 insertions(+), 26 deletions(-) create mode 100644 source/adaptive_load/adaptive_load_controller_impl.cc create mode 100644 source/adaptive_load/adaptive_load_controller_impl.h create mode 100644 test/adaptive_load/adaptive_load_controller_test.cc diff --git a/include/nighthawk/adaptive_load/adaptive_load_controller.h b/include/nighthawk/adaptive_load/adaptive_load_controller.h index 512e8cd5f..e510ee3b6 100644 --- a/include/nighthawk/adaptive_load/adaptive_load_controller.h +++ b/include/nighthawk/adaptive_load/adaptive_load_controller.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/common/time.h" +#include "envoy/common/pure.h" #include "external/envoy/source/common/common/statusor.h" @@ -10,26 +10,35 @@ namespace Nighthawk { /** - * Performs an adaptive load session consisting of the Adjusting Stage and the - * Testing Stage. Adjusting Stage: Runs a series of short benchmarks, checks metrics according to - * MetricSpecs, and adjusts load up or down based on the result; returns an error if convergence is - * not detected before the deadline in the spec. Load adjustments and convergence detection are - * computed by a StepController plugin. Metric values are obtained through MetricsPlugins. Testing - * Stage: When the optimal load is found, runs one long benchmark to validate it. - * - * @param nighthawk_service_stub A Nighthawk Service gRPC stub. - * @param spec A proto that defines all aspects of the adaptive load session, including metrics, - * threshold, duration of adjusting stage benchmarks, and underlying Nighthawk traffic parameters. - * @param time_source An abstraction of the system clock. Normally, just construct an - * Envoy::Event::RealTimeSystem and pass it. NO_CHECK_FORMAT(real_time). If calling from an - * Envoy-based process, there may be an existing TimeSource or TimeSystem to use. If calling - * from a test, pass a fake TimeSource. - * - * @return StatusOr A proto logging the result of all traffic attempted - * and all corresponding metric values and scores, or an overall error status if the session failed. + * Contains the main loop of the adaptive load controller. Consults a StepController for load + * decisions, interacts with Nighthawk Service and MetricsPlugins. */ -absl::StatusOr PerformAdaptiveLoadSession( - nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, - const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec, Envoy::TimeSource& time_source); +class AdaptiveLoadController { +public: + virtual ~AdaptiveLoadController() = default; + /** + * Performs an adaptive load session consisting of the Adjusting Stage and the + * Testing Stage. + * + * Adjusting Stage: Runs a series of short benchmarks, checks metrics according to MetricSpecs, + * and adjusts load up or down based on the result. Returns an error if convergence is not + * detected before the deadline in the spec. Load adjustments and convergence detection are + * computed by a StepController plugin. Metric values are obtained through MetricsPlugins. + * + * Testing Stage: When the optimal load is found, runs one long benchmark to validate it. + * + * @param nighthawk_service_stub A Nighthawk Service gRPC stub. + * @param spec A proto that defines all aspects of the adaptive load session, including metrics, + * threshold, duration of adjusting stage benchmarks, and underlying Nighthawk traffic parameters. + * + * @return StatusOr A proto logging the result of all traffic attempted + * and all corresponding metric values and scores, or an overall error status if the session + * failed. + */ + virtual absl::StatusOr + PerformAdaptiveLoadSession( + nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) PURE; +}; } // namespace Nighthawk diff --git a/source/adaptive_load/BUILD b/source/adaptive_load/BUILD index 3bc034c93..f6d8ccd8c 100644 --- a/source/adaptive_load/BUILD +++ b/source/adaptive_load/BUILD @@ -8,6 +8,30 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_library( + name = "adaptive_load_controller_impl", + srcs = [ + "adaptive_load_controller_impl.cc", + ], + hdrs = [ + "adaptive_load_controller_impl.h", + ], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + ":metrics_plugin_impl", + ":plugin_loader", + "//api/adaptive_load:adaptive_load_proto_cc_proto", + "//api/client:base_cc_proto", + "//include/nighthawk/adaptive_load:adaptive_load_controller", + "//include/nighthawk/adaptive_load:metrics_evaluator", + "//include/nighthawk/adaptive_load:scoring_function", + "//include/nighthawk/adaptive_load:session_spec_proto_helper", + "//include/nighthawk/adaptive_load:step_controller", + "//include/nighthawk/common:nighthawk_service_client", + ], +) + envoy_cc_library( name = "config_validator_impl", srcs = [ diff --git a/source/adaptive_load/adaptive_load_controller_impl.cc b/source/adaptive_load/adaptive_load_controller_impl.cc new file mode 100644 index 000000000..39d29c915 --- /dev/null +++ b/source/adaptive_load/adaptive_load_controller_impl.cc @@ -0,0 +1,205 @@ +#include "adaptive_load/adaptive_load_controller_impl.h" + +#include + +#include "envoy/common/exception.h" +#include "envoy/config/core/v3/base.pb.h" + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" +#include "nighthawk/adaptive_load/metrics_plugin.h" +#include "nighthawk/adaptive_load/scoring_function.h" +#include "nighthawk/adaptive_load/step_controller.h" + +#include "external/envoy/source/common/common/logger.h" +#include "external/envoy/source/common/common/statusor.h" +#include "external/envoy/source/common/protobuf/protobuf.h" + +#include "api/adaptive_load/adaptive_load.pb.h" +#include "api/adaptive_load/benchmark_result.pb.h" +#include "api/adaptive_load/metric_spec.pb.h" +#include "api/client/options.pb.h" +#include "api/client/output.pb.h" +#include "api/client/service.grpc.pb.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/status/status.h" +#include "absl/strings/str_format.h" +#include "absl/strings/str_join.h" +#include "adaptive_load/metrics_plugin_impl.h" +#include "adaptive_load/plugin_loader.h" + +namespace Nighthawk { + +namespace { + +using nighthawk::adaptive_load::AdaptiveLoadSessionOutput; +using nighthawk::adaptive_load::AdaptiveLoadSessionSpec; +using nighthawk::adaptive_load::BenchmarkResult; +using nighthawk::adaptive_load::MetricEvaluation; +using nighthawk::adaptive_load::MetricSpec; +using nighthawk::adaptive_load::MetricSpecWithThreshold; +using nighthawk::adaptive_load::ThresholdSpec; + +/** + * Loads and initializes MetricsPlugins requested in the session spec. Assumes the spec has already + * been validated; crashes the process otherwise. + * + * @param spec Adaptive load session spec proto that has already been validated. + * + * @return Map from MetricsPlugin names to initialized plugins, to be used in the course of a single + * adaptive load session based on the session spec. + */ +absl::flat_hash_map +LoadMetricsPlugins(const AdaptiveLoadSessionSpec& spec) { + absl::flat_hash_map name_to_custom_metrics_plugin_map; + for (const envoy::config::core::v3::TypedExtensionConfig& config : + spec.metrics_plugin_configs()) { + absl::StatusOr metrics_plugin_or = LoadMetricsPlugin(config); + RELEASE_ASSERT( + metrics_plugin_or.ok(), + absl::StrCat( + "MetricsPlugin loading error should have been caught during input validation: ", + metrics_plugin_or.status().message())); + name_to_custom_metrics_plugin_map[config.name()] = std::move(metrics_plugin_or.value()); + } + return name_to_custom_metrics_plugin_map; +} + +/** + * Loads and initializes a StepController plugin requested in the session spec. Assumes + * the spec has already been validated; crashes the process otherwise. + * + * @param spec Adaptive load session spec proto that has already been validated. + * + * @return unique_ptr Initialized StepController plugin. + */ +StepControllerPtr LoadStepControllerPluginFromSpec(const AdaptiveLoadSessionSpec& spec) { + absl::StatusOr step_controller_or = + LoadStepControllerPlugin(spec.step_controller_config(), spec.nighthawk_traffic_template()); + RELEASE_ASSERT( + step_controller_or.ok(), + absl::StrCat( + "StepController plugin loading error should have been caught during input validation: ", + step_controller_or.status().message())); + return std::move(step_controller_or.value()); +} + +} // namespace + +AdaptiveLoadControllerImpl::AdaptiveLoadControllerImpl( + const NighthawkServiceClient& nighthawk_service_client, + const MetricsEvaluator& metrics_evaluator, + const AdaptiveLoadSessionSpecProtoHelper& session_spec_proto_helper, + Envoy::TimeSource& time_source) + : nighthawk_service_client_{nighthawk_service_client}, metrics_evaluator_{metrics_evaluator}, + session_spec_proto_helper_{session_spec_proto_helper}, time_source_{time_source} {} + +absl::StatusOr AdaptiveLoadControllerImpl::PerformAndAnalyzeNighthawkBenchmark( + nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, + const AdaptiveLoadSessionSpec& spec, + const absl::flat_hash_map& name_to_custom_plugin_map, + StepController& step_controller, Envoy::ProtobufWkt::Duration duration) { + absl::StatusOr command_line_options_or = + step_controller.GetCurrentCommandLineOptions(); + if (!command_line_options_or.ok()) { + ENVOY_LOG_MISC(error, "Error constructing Nighthawk input: {}: {}", + command_line_options_or.status().code(), + command_line_options_or.status().message()); + return command_line_options_or.status(); + } + nighthawk::client::CommandLineOptions command_line_options = command_line_options_or.value(); + // Overwrite the duration in the traffic template with the specified duration of the adjusting + // or testing stage. + *command_line_options.mutable_duration() = std::move(duration); + + ENVOY_LOG_MISC(info, "Sending load: {}", command_line_options.DebugString()); + absl::StatusOr nighthawk_response_or = + nighthawk_service_client_.PerformNighthawkBenchmark(nighthawk_service_stub, + command_line_options); + if (!nighthawk_response_or.ok()) { + ENVOY_LOG_MISC(error, "Nighthawk Service error: {}: {}", nighthawk_response_or.status().code(), + nighthawk_response_or.status().message()); + return nighthawk_response_or.status(); + } + nighthawk::client::ExecutionResponse nighthawk_response = nighthawk_response_or.value(); + + absl::StatusOr benchmark_result_or = + metrics_evaluator_.AnalyzeNighthawkBenchmark(nighthawk_response, spec, + name_to_custom_plugin_map); + if (!benchmark_result_or.ok()) { + ENVOY_LOG_MISC(error, "Benchmark scoring error: {}: {}", benchmark_result_or.status().code(), + benchmark_result_or.status().message()); + return benchmark_result_or.status(); + } + BenchmarkResult benchmark_result = benchmark_result_or.value(); + for (const MetricEvaluation& evaluation : benchmark_result.metric_evaluations()) { + ENVOY_LOG_MISC(info, "Evaluation: {}", evaluation.DebugString()); + } + step_controller.UpdateAndRecompute(benchmark_result); + return benchmark_result; +} + +absl::StatusOr AdaptiveLoadControllerImpl::PerformAdaptiveLoadSession( + nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, + const AdaptiveLoadSessionSpec& input_spec) { + AdaptiveLoadSessionSpec spec = session_spec_proto_helper_.SetSessionSpecDefaults(input_spec); + absl::Status validation_status = session_spec_proto_helper_.CheckSessionSpec(spec); + if (!validation_status.ok()) { + ENVOY_LOG_MISC(error, "Validation failed: {}", validation_status.message()); + return validation_status; + } + absl::flat_hash_map name_to_custom_metrics_plugin_map = + LoadMetricsPlugins(spec); + StepControllerPtr step_controller = LoadStepControllerPluginFromSpec(spec); + AdaptiveLoadSessionOutput output; + + // Threshold specs are reproduced in the output proto for convenience. + for (const nighthawk::adaptive_load::MetricSpecWithThreshold& threshold : + spec.metric_thresholds()) { + *output.mutable_metric_thresholds()->Add() = threshold; + } + + // Perform adjusting stage: + Envoy::MonotonicTime start_time = time_source_.monotonicTime(); + std::string doom_reason; + do { + absl::StatusOr result_or = PerformAndAnalyzeNighthawkBenchmark( + nighthawk_service_stub, spec, name_to_custom_metrics_plugin_map, *step_controller, + spec.measuring_period()); + if (!result_or.ok()) { + return result_or.status(); + } + BenchmarkResult result = result_or.value(); + *output.mutable_adjusting_stage_results()->Add() = result; + + const std::chrono::nanoseconds time_limit_ns( + Envoy::Protobuf::util::TimeUtil::DurationToNanoseconds(spec.convergence_deadline())); + const auto elapsed_time_ns = std::chrono::duration_cast( + time_source_.monotonicTime() - start_time); + if (elapsed_time_ns > time_limit_ns) { + std::string message = absl::StrFormat("Failed to converge before deadline of %.2f seconds.", + time_limit_ns.count() / 1e9); + ENVOY_LOG_MISC(error, message); + return absl::DeadlineExceededError(message); + } + } while (!step_controller->IsConverged() && !step_controller->IsDoomed(doom_reason)); + + if (step_controller->IsDoomed(doom_reason)) { + std::string message = + absl::StrCat("Step controller determined that it can never converge: ", doom_reason); + ENVOY_LOG_MISC(error, message); + return absl::AbortedError(message); + } + + // Perform testing stage: + absl::StatusOr result_or = PerformAndAnalyzeNighthawkBenchmark( + nighthawk_service_stub, spec, name_to_custom_metrics_plugin_map, *step_controller, + spec.testing_stage_duration()); + if (!result_or.ok()) { + return result_or.status(); + } + *output.mutable_testing_stage_result() = result_or.value(); + return output; +} + +} // namespace Nighthawk diff --git a/source/adaptive_load/adaptive_load_controller_impl.h b/source/adaptive_load/adaptive_load_controller_impl.h new file mode 100644 index 000000000..4dc073892 --- /dev/null +++ b/source/adaptive_load/adaptive_load_controller_impl.h @@ -0,0 +1,83 @@ +#include "envoy/common/time.h" + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" +#include "nighthawk/adaptive_load/metrics_evaluator.h" +#include "nighthawk/adaptive_load/session_spec_proto_helper.h" +#include "nighthawk/adaptive_load/step_controller.h" +#include "nighthawk/common/nighthawk_service_client.h" + +namespace Nighthawk { + +class AdaptiveLoadControllerImpl : public AdaptiveLoadController { +public: + /** + * Constructs an implementation of the adaptive load controller main loop that relies on logic in + * several helper objects. Through helpers, it performs Nighthawk Service benchmarks, obtains + * metrics from MetricsPlugins, scores the results, and consults a StepController plugin to + * determine the next load and detect convergence. All plugins are specified through the + * AdaptiveLoadSessionSpec proto. + * + * This class is thread-safe, but Nighthawk Service itself does not support multiple simultaneous + * benchmarks. + * + * Usage: + * + * AdaptiveLoadControllerImpl controller( + * NighthawkServiceClientImpl(), + * MetricsEvaluatorImpl(), + * AdaptiveLoadSessionSpecProtoHelperImpl(), + * Envoy::Event::RealTimeSystem()); // NO_CHECK_FORMAT(real_time)) + * AdaptiveLoadSessionSpec spec; + * // (set spec fields here) + * StatusOr output = + * controller.PerformAdaptiveLoadSession(&nighthawk_service_stub, spec); + * + * @param nighthawk_service_client A helper that executes Nighthawk Service benchmarks given a + * gRPC stub. + * @param metrics_evaluator A helper that obtains metrics from MetricsPlugins and Nighthawk + * Service responses, then scores them. + * @param session_spec_proto_helper A helper that sets default values and performs validation in + * an AdaptiveLoadSessionSpec proto. + * @param time_source An abstraction of the system clock. Normally, just construct an + * Envoy::Event::RealTimeSystem and pass it. NO_CHECK_FORMAT(real_time). If calling from an + * Envoy-based process, there may be an existing TimeSource or TimeSystem to use. If calling + * from a test, pass a fake TimeSource. + */ + AdaptiveLoadControllerImpl(const NighthawkServiceClient& nighthawk_service_client, + const MetricsEvaluator& metrics_evaluator, + const AdaptiveLoadSessionSpecProtoHelper& session_spec_proto_helper, + Envoy::TimeSource& time_source); + + absl::StatusOr PerformAdaptiveLoadSession( + nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec) override; + +private: + /** + * Gets the current load from the StepController, performs a benchmark via a Nighthawk Service, + * hands the result off for analysis, and reports the scores back to the StepController. + * + * @param nighthawk_service_stub Nighthawk Service gRPC stub. + * @param spec Proto describing the overall adaptive load session. + * @param name_to_custom_plugin_map Common map from plugin names to MetricsPlugins loaded and + * initialized once at the beginning of the session and passed to all calls of this function. + * @param step_controller The active StepController specified in the session spec proto. + * @param duration The duration of the benchmark to insert into the traffic template, different + * between adjusting and testing stages. + * + * @return BenchmarkResult Proto containing either an error status or raw Nighthawk Service + * results, metric values, and metric scores. + */ + absl::StatusOr PerformAndAnalyzeNighthawkBenchmark( + nighthawk::client::NighthawkService::StubInterface* nighthawk_service_stub, + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec, + const absl::flat_hash_map& name_to_custom_plugin_map, + StepController& step_controller, Envoy::ProtobufWkt::Duration duration); + + const NighthawkServiceClient& nighthawk_service_client_; + const MetricsEvaluator& metrics_evaluator_; + const AdaptiveLoadSessionSpecProtoHelper& session_spec_proto_helper_; + Envoy::TimeSource& time_source_; +}; + +} // namespace Nighthawk diff --git a/test/adaptive_load/BUILD b/test/adaptive_load/BUILD index 78518f6cb..b831726cc 100644 --- a/test/adaptive_load/BUILD +++ b/test/adaptive_load/BUILD @@ -21,6 +21,30 @@ envoy_cc_test_library( ], ) +envoy_cc_test( + name = "adaptive_load_controller_test", + srcs = ["adaptive_load_controller_test.cc"], + repository = "@envoy", + deps = [ + ":minimal_output", + "//api/client:grpc_service_lib", + "//include/nighthawk/adaptive_load:input_variable_setter", + "//include/nighthawk/adaptive_load:step_controller", + "//source/adaptive_load:adaptive_load_controller_impl", + "//source/adaptive_load:scoring_function_impl", + "//source/adaptive_load:session_spec_proto_helper_impl", + "//test/adaptive_load/fake_plugins/fake_step_controller", + "//test/common:fake_time_source", + "//test/mocks/adaptive_load:mock_metrics_evaluator", + "//test/mocks/adaptive_load:mock_session_spec_proto_helper", + "//test/mocks/common:mock_nighthawk_service_client", + "@envoy//source/common/common:assert_lib_with_external_headers", + "@envoy//source/common/common:statusor_lib_with_external_headers", + "@envoy//source/common/config:utility_lib_with_external_headers", + "@envoy//source/common/protobuf:protobuf_with_external_headers", + ], +) + envoy_cc_test( name = "input_variable_setter_test", srcs = ["input_variable_setter_test.cc"], diff --git a/test/adaptive_load/adaptive_load_controller_test.cc b/test/adaptive_load/adaptive_load_controller_test.cc new file mode 100644 index 000000000..122a7f5e6 --- /dev/null +++ b/test/adaptive_load/adaptive_load_controller_test.cc @@ -0,0 +1,303 @@ +#include + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/registry/registry.h" + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" +#include "nighthawk/adaptive_load/input_variable_setter.h" +#include "nighthawk/adaptive_load/metrics_evaluator.h" +#include "nighthawk/adaptive_load/metrics_plugin.h" +#include "nighthawk/adaptive_load/scoring_function.h" +#include "nighthawk/adaptive_load/step_controller.h" + +#include "external/envoy/source/common/common/statusor.h" +#include "external/envoy/source/common/config/utility.h" +#include "external/envoy/source/common/protobuf/protobuf.h" + +#include "api/adaptive_load/adaptive_load.pb.h" +#include "api/adaptive_load/benchmark_result.pb.h" +#include "api/adaptive_load/input_variable_setter_impl.pb.h" +#include "api/adaptive_load/metric_spec.pb.h" +#include "api/adaptive_load/metrics_plugin_impl.pb.h" +#include "api/adaptive_load/scoring_function_impl.pb.h" +#include "api/adaptive_load/step_controller_impl.pb.h" +#include "api/client/options.pb.h" +#include "api/client/output.pb.h" +#include "api/client/service.grpc.pb.h" +#include "api/client/service.pb.h" +#include "api/client/service_mock.grpc.pb.h" + +#include "test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.h" +#include "test/common/fake_time_source.h" +#include "test/mocks/adaptive_load/mock_metrics_evaluator.h" +#include "test/mocks/adaptive_load/mock_session_spec_proto_helper.h" +#include "test/mocks/common/mock_nighthawk_service_client.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/status/status.h" +#include "absl/strings/str_join.h" +#include "adaptive_load/adaptive_load_controller_impl.h" +#include "adaptive_load/metrics_plugin_impl.h" +#include "adaptive_load/plugin_loader.h" +#include "adaptive_load/scoring_function_impl.h" +#include "adaptive_load/session_spec_proto_helper_impl.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Nighthawk { + +namespace { + +using ::Envoy::Protobuf::util::MessageDifferencer; +using ::nighthawk::adaptive_load::AdaptiveLoadSessionOutput; +using ::nighthawk::adaptive_load::AdaptiveLoadSessionSpec; +using ::nighthawk::adaptive_load::BenchmarkResult; +using ::nighthawk::adaptive_load::MetricEvaluation; +using ::nighthawk::adaptive_load::MetricSpec; +using ::nighthawk::adaptive_load::MetricSpecWithThreshold; +using ::nighthawk::adaptive_load::ThresholdSpec; +using ::nighthawk::client::MockNighthawkServiceStub; +using ::testing::_; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::HasSubstr; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; + +/** + * Creates a valid BenchmarkResult proto with only the score set. Useful for controlling the + * FakeStepController, which returns convergence for score > 0 and doom for a score < 0. + * + * @param score Positive number for a converging BenchmarkResult, negative number for doomed, zero + * for neither. + * + * @return BenchmarkResult An incomplete BenchmarkResult useful only for determining + * FakeStepController convergence and doom. + */ +BenchmarkResult MakeBenchmarkResultWithScore(double score) { + BenchmarkResult benchmark_result; + MetricEvaluation* evaluation = benchmark_result.mutable_metric_evaluations()->Add(); + evaluation->set_threshold_score(score); + return benchmark_result; +} + +/** + * Creates a minimal AdaptiveLoadSessionSpec with a FakeStepController. + * + * @return AdaptiveLoadSessionSpec with a FakeStepController and enough fields set to pass + * validation. + */ +AdaptiveLoadSessionSpec MakeValidAdaptiveLoadSessionSpec() { + AdaptiveLoadSessionSpec spec; + spec.mutable_convergence_deadline()->set_seconds(100); + *spec.mutable_step_controller_config() = MakeFakeStepControllerPluginConfigWithRps(10); + MetricSpecWithThreshold* expected_spec_with_threshold = spec.mutable_metric_thresholds()->Add(); + expected_spec_with_threshold->mutable_metric_spec()->set_metric_name("success-rate"); + expected_spec_with_threshold->mutable_threshold_spec()->mutable_scoring_function()->set_name( + "nighthawk.binary_scoring"); + expected_spec_with_threshold->mutable_threshold_spec() + ->mutable_scoring_function() + ->mutable_typed_config() + ->PackFrom(nighthawk::adaptive_load::BinaryScoringFunctionConfig()); + return spec; +} + +class AdaptiveLoadControllerImplFixture : public testing::Test { +public: + void SetUp() override { + ON_CALL(mock_nighthawk_service_client_, PerformNighthawkBenchmark) + .WillByDefault(Return(nighthawk::client::ExecutionResponse())); + } + +protected: + NiceMock mock_nighthawk_service_client_; + NiceMock mock_metrics_evaluator_; + FakeIncrementingMonotonicTimeSource fake_time_source_; + MockNighthawkServiceStub mock_nighthawk_service_stub_; + // Real spec helper is simpler to use because SetSessionSpecDefaults preserves values a test + // sets in the spec; the mock inconveniently discards the input and returns an empty spec. + AdaptiveLoadSessionSpecProtoHelperImpl real_spec_proto_helper_; +}; + +TEST_F(AdaptiveLoadControllerImplFixture, SetsSpecDefaults) { + NiceMock mock_spec_proto_helper; + AdaptiveLoadSessionSpec spec = MakeValidAdaptiveLoadSessionSpec(); + EXPECT_CALL(mock_spec_proto_helper, SetSessionSpecDefaults(_)).WillOnce(Return(spec)); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + mock_spec_proto_helper, fake_time_source_); + + (void)controller.PerformAdaptiveLoadSession(&mock_nighthawk_service_stub_, spec); +} + +TEST_F(AdaptiveLoadControllerImplFixture, PropagatesSpecValidationError) { + NiceMock mock_spec_proto_helper; + EXPECT_CALL(mock_spec_proto_helper, CheckSessionSpec(_)) + .WillOnce(Return(absl::DataLossError("artificial spec error"))); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + mock_spec_proto_helper, fake_time_source_); + + absl::StatusOr output_or = controller.PerformAdaptiveLoadSession( + &mock_nighthawk_service_stub_, MakeValidAdaptiveLoadSessionSpec()); + ASSERT_FALSE(output_or.ok()); + EXPECT_EQ(output_or.status().code(), absl::StatusCode::kDataLoss); + EXPECT_EQ(output_or.status().message(), "artificial spec error"); +} + +TEST_F(AdaptiveLoadControllerImplFixture, CopiesThresholdSpecsIntoOutput) { + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillRepeatedly(Return(MakeBenchmarkResultWithScore(1.0))); + + AdaptiveLoadSessionSpecProtoHelperImpl spec_helper; + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + spec_helper, fake_time_source_); + + AdaptiveLoadSessionSpec spec = + spec_helper.SetSessionSpecDefaults(MakeValidAdaptiveLoadSessionSpec()); + absl::StatusOr output_or = + controller.PerformAdaptiveLoadSession(&mock_nighthawk_service_stub_, spec); + ASSERT_TRUE(output_or.ok()); + ASSERT_GT(output_or.value().metric_thresholds_size(), 0); + MetricSpecWithThreshold actual_spec_with_threshold = output_or.value().metric_thresholds(0); + EXPECT_TRUE( + MessageDifferencer::Equivalent(actual_spec_with_threshold, spec.metric_thresholds(0))); + EXPECT_EQ(actual_spec_with_threshold.DebugString(), spec.metric_thresholds(0).DebugString()); +} + +TEST_F(AdaptiveLoadControllerImplFixture, TimesOutIfNeverConverged) { + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillRepeatedly(Return(MakeBenchmarkResultWithScore(0.0))); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + AdaptiveLoadSessionSpec spec = MakeValidAdaptiveLoadSessionSpec(); + absl::StatusOr output_or = + controller.PerformAdaptiveLoadSession(&mock_nighthawk_service_stub_, spec); + ASSERT_FALSE(output_or.ok()); + EXPECT_EQ(output_or.status().code(), absl::StatusCode::kDeadlineExceeded); + EXPECT_THAT(output_or.status().message(), HasSubstr("Failed to converge")); +} + +TEST_F(AdaptiveLoadControllerImplFixture, ReturnsErrorWhenDoomed) { + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillOnce(Return(MakeBenchmarkResultWithScore(-1.0))); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + absl::StatusOr output_or = controller.PerformAdaptiveLoadSession( + &mock_nighthawk_service_stub_, MakeValidAdaptiveLoadSessionSpec()); + ASSERT_FALSE(output_or.ok()); + EXPECT_EQ(output_or.status().code(), absl::StatusCode::kAborted); + EXPECT_THAT(output_or.status().message(), HasSubstr("can never converge")); +} + +TEST_F(AdaptiveLoadControllerImplFixture, + PropagatesErrorWhenInputValueSettingFailsInAdjustingStage) { + const std::string kExpectedErrorMessage = "artificial input setting error"; + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillRepeatedly(Return(MakeBenchmarkResultWithScore(-1.0))); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + AdaptiveLoadSessionSpec spec = MakeValidAdaptiveLoadSessionSpec(); + *spec.mutable_step_controller_config() = MakeFakeStepControllerPluginConfigWithInputSettingError( + 10, absl::DataLossError(kExpectedErrorMessage), /*countdown=*/0); + absl::StatusOr output_or = + controller.PerformAdaptiveLoadSession(&mock_nighthawk_service_stub_, spec); + ASSERT_FALSE(output_or.ok()); + EXPECT_EQ(output_or.status().code(), absl::StatusCode::kDataLoss); + EXPECT_THAT(output_or.status().message(), HasSubstr(kExpectedErrorMessage)); +} + +TEST_F(AdaptiveLoadControllerImplFixture, PropagatesErrorWhenInputValueSettingFailsInTestingStage) { + const std::string kExpectedErrorMessage = "artificial input setting error"; + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillRepeatedly(Return(MakeBenchmarkResultWithScore(1.0))); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + AdaptiveLoadSessionSpec spec = MakeValidAdaptiveLoadSessionSpec(); + *spec.mutable_step_controller_config() = MakeFakeStepControllerPluginConfigWithInputSettingError( + 10, absl::DataLossError(kExpectedErrorMessage), /*countdown=*/1); + absl::StatusOr output_or = + controller.PerformAdaptiveLoadSession(&mock_nighthawk_service_stub_, spec); + ASSERT_FALSE(output_or.ok()); + EXPECT_EQ(output_or.status().code(), absl::StatusCode::kDataLoss); + EXPECT_THAT(output_or.status().message(), HasSubstr(kExpectedErrorMessage)); +} + +TEST_F(AdaptiveLoadControllerImplFixture, PropagatesErrorFromNighthawkService) { + const std::string kExpectedErrorMessage = "artificial nighthawk service error"; + EXPECT_CALL(mock_nighthawk_service_client_, PerformNighthawkBenchmark(_, _)) + .WillOnce(Return(absl::DataLossError(kExpectedErrorMessage))); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + absl::StatusOr output_or = controller.PerformAdaptiveLoadSession( + &mock_nighthawk_service_stub_, MakeValidAdaptiveLoadSessionSpec()); + ASSERT_FALSE(output_or.ok()); + EXPECT_EQ(output_or.status().code(), absl::StatusCode::kDataLoss); + EXPECT_THAT(output_or.status().message(), HasSubstr(kExpectedErrorMessage)); +} + +TEST_F(AdaptiveLoadControllerImplFixture, PropagatesErrorFromMetricsEvaluator) { + const std::string kExpectedErrorMessage = "artificial metrics evaluator error"; + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillOnce(Return(absl::DataLossError(kExpectedErrorMessage))); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + absl::StatusOr output_or = controller.PerformAdaptiveLoadSession( + &mock_nighthawk_service_stub_, MakeValidAdaptiveLoadSessionSpec()); + ASSERT_FALSE(output_or.ok()); + EXPECT_EQ(output_or.status().code(), absl::StatusCode::kDataLoss); + EXPECT_THAT(output_or.status().message(), HasSubstr(kExpectedErrorMessage)); +} + +TEST_F(AdaptiveLoadControllerImplFixture, StoresAdjustingStageResult) { + BenchmarkResult expected_benchmark_result = MakeBenchmarkResultWithScore(1.0); + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillRepeatedly(Return(expected_benchmark_result)); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + AdaptiveLoadSessionSpec spec = MakeValidAdaptiveLoadSessionSpec(); + absl::StatusOr output_or = + controller.PerformAdaptiveLoadSession(&mock_nighthawk_service_stub_, spec); + ASSERT_TRUE(output_or.ok()); + ASSERT_EQ(output_or.value().adjusting_stage_results_size(), 1); + const BenchmarkResult& actual_benchmark_result = output_or.value().adjusting_stage_results(0); + EXPECT_TRUE(MessageDifferencer::Equivalent(actual_benchmark_result, expected_benchmark_result)); + EXPECT_EQ(actual_benchmark_result.DebugString(), expected_benchmark_result.DebugString()); +} + +TEST_F(AdaptiveLoadControllerImplFixture, StoresTestingStageResult) { + BenchmarkResult expected_benchmark_result = MakeBenchmarkResultWithScore(1.0); + EXPECT_CALL(mock_metrics_evaluator_, AnalyzeNighthawkBenchmark(_, _, _)) + .WillRepeatedly(Return(expected_benchmark_result)); + + AdaptiveLoadControllerImpl controller(mock_nighthawk_service_client_, mock_metrics_evaluator_, + real_spec_proto_helper_, fake_time_source_); + + AdaptiveLoadSessionSpec spec = MakeValidAdaptiveLoadSessionSpec(); + absl::StatusOr output_or = + controller.PerformAdaptiveLoadSession(&mock_nighthawk_service_stub_, spec); + ASSERT_TRUE(output_or.ok()); + const BenchmarkResult& actual_benchmark_result = output_or.value().testing_stage_result(); + EXPECT_TRUE(MessageDifferencer::Equivalent(actual_benchmark_result, expected_benchmark_result)); + EXPECT_EQ(actual_benchmark_result.DebugString(), expected_benchmark_result.DebugString()); +} + +} // namespace + +} // namespace Nighthawk diff --git a/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.cc b/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.cc index da47985e7..6bb0e0ba1 100644 --- a/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.cc +++ b/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.cc @@ -98,7 +98,7 @@ FakeStepControllerConfigFactory::ValidateConfig(const Envoy::Protobuf::Message& REGISTER_FACTORY(FakeStepControllerConfigFactory, StepControllerConfigFactory); envoy::config::core::v3::TypedExtensionConfig -MakeFakeStepControllerPluginConfig(int fixed_rps_value) { +MakeFakeStepControllerPluginConfigWithRps(int fixed_rps_value) { envoy::config::core::v3::TypedExtensionConfig outer_config; outer_config.set_name("nighthawk.fake_step_controller"); nighthawk::adaptive_load::FakeStepControllerConfig config; diff --git a/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.h b/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.h index 0c462929a..c22f72826 100644 --- a/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.h +++ b/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller.h @@ -90,7 +90,7 @@ DECLARE_FACTORY(FakeStepControllerConfigFactory); * FakeStepControllerConfig proto wrapped in an Any. */ envoy::config::core::v3::TypedExtensionConfig -MakeFakeStepControllerPluginConfig(int fixed_rps_value); +MakeFakeStepControllerPluginConfigWithRps(int fixed_rps_value); /** * Creates a valid TypedExtensionConfig proto that activates a FakeStepController with a diff --git a/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller_test.cc b/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller_test.cc index c6952c743..905c9fa02 100644 --- a/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller_test.cc +++ b/test/adaptive_load/fake_plugins/fake_step_controller/fake_step_controller_test.cc @@ -210,15 +210,16 @@ TEST(FakeStepController, IsDoomedReturnsTrueAndSetsDoomedReasonAfterNegativeBenc TEST(MakeFakeStepControllerPluginConfig, ActivatesFakeStepControllerPlugin) { absl::StatusOr plugin_or = LoadStepControllerPlugin( - MakeFakeStepControllerPluginConfig(0), nighthawk::client::CommandLineOptions{}); + MakeFakeStepControllerPluginConfigWithRps(0), nighthawk::client::CommandLineOptions{}); ASSERT_TRUE(plugin_or.ok()); EXPECT_NE(dynamic_cast(plugin_or.value().get()), nullptr); } TEST(MakeFakeStepControllerPluginConfig, ProducesFakeStepControllerPluginWithConfiguredValue) { const int kExpectedRps = 5; - absl::StatusOr plugin_or = LoadStepControllerPlugin( - MakeFakeStepControllerPluginConfig(kExpectedRps), nighthawk::client::CommandLineOptions{}); + absl::StatusOr plugin_or = + LoadStepControllerPlugin(MakeFakeStepControllerPluginConfigWithRps(kExpectedRps), + nighthawk::client::CommandLineOptions{}); ASSERT_TRUE(plugin_or.ok()); auto* plugin = dynamic_cast(plugin_or.value().get()); ASSERT_NE(plugin, nullptr); From 164d98b7cc8f64c23766693ffd3d3499848ce15b Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 23 Sep 2020 22:00:21 +0200 Subject: [PATCH 17/63] Add generic c++ integration test for uniform extension behavior. (#533) Test basic behavioural properties of all test-server extensions. Any new extensions may piggy-back on this by enlisting themselves. In a follow up we'll purge code from the pre-existing tests that can now be deprecated. Split out from #512 Signed-off-by: Otto van der Schaaf --- test/server/BUILD | 16 ++++- test/server/http_filter_base_test.cc | 94 ++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 test/server/http_filter_base_test.cc diff --git a/test/server/BUILD b/test/server/BUILD index 71ae178ab..68d5bab08 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -20,15 +20,26 @@ envoy_cc_test_library( ], ) +envoy_cc_test( + name = "http_filter_base_test", + srcs = ["http_filter_base_test.cc"], + repository = "@envoy", + deps = [ + ":http_filter_integration_test_base_lib", + "//source/server:http_dynamic_delay_filter_config", + "//source/server:http_test_server_filter_config", + "//source/server:http_time_tracking_filter_config", + ], +) + envoy_cc_test( name = "http_test_server_filter_integration_test", srcs = ["http_test_server_filter_integration_test.cc"], repository = "@envoy", deps = [ + ":http_filter_integration_test_base_lib", "//source/server:http_test_server_filter_config", - "@envoy//include/envoy/upstream:cluster_manager_interface_with_external_headers", "@envoy//source/common/api:api_lib_with_external_headers", - "@envoy//test/integration:http_integration_lib", ], ) @@ -50,7 +61,6 @@ envoy_cc_test( deps = [ ":http_filter_integration_test_base_lib", "//source/server:http_time_tracking_filter_config", - "@envoy//include/envoy/upstream:cluster_manager_interface_with_external_headers", "@envoy//source/common/api:api_lib_with_external_headers", "@envoy//test/test_common:simulated_time_system_lib", ], diff --git a/test/server/http_filter_base_test.cc b/test/server/http_filter_base_test.cc new file mode 100644 index 000000000..212bd59b5 --- /dev/null +++ b/test/server/http_filter_base_test.cc @@ -0,0 +1,94 @@ +#include "server/http_dynamic_delay_filter.h" +#include "server/http_test_server_filter.h" +#include "server/http_time_tracking_filter.h" + +#include "test/server/http_filter_integration_test_base.h" + +#include "gtest/gtest.h" + +namespace Nighthawk { +namespace { + +using ::testing::HasSubstr; + +enum TestRequestMethod { GET, POST }; + +const std::string kBadConfigErrorSentinel = + "didn't understand the request: Error merging json config: Unable to parse " + "JSON as proto (INVALID_ARGUMENT:Unexpected"; + +class HttpFilterBaseIntegrationTest + : public HttpFilterIntegrationTestBase, + public testing::TestWithParam< + std::tuple> { +public: + HttpFilterBaseIntegrationTest() + : HttpFilterIntegrationTestBase(std::get<0>(GetParam())), config_(std::get<1>(GetParam())) { + initializeFilterConfiguration(config_); + if (std::get<2>(GetParam()) == TestRequestMethod::POST) { + switchToPostWithEntityBody(); + } + }; + + ResponseOrigin getHappyFlowResponseOrigin() { + // Modulo the test-server, extensions are expected to need an upstream to synthesize a reply + // when the effective configuration is valid. + return config_.find_first_of("name: test-server") == 0 ? ResponseOrigin::EXTENSION + : ResponseOrigin::UPSTREAM; + } + +protected: + const std::string config_; +}; + +INSTANTIATE_TEST_SUITE_P( + IpVersions, HttpFilterBaseIntegrationTest, + ::testing::Combine(testing::ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest()), + testing::ValuesIn({absl::string_view(R"EOF( +name: time-tracking +typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + emit_previous_request_delta_in_response_header: "foo" +)EOF"), + absl::string_view(R"EOF( +name: dynamic-delay +typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + static_delay: 0.1s +)EOF"), + absl::string_view("name: test-server")}), + testing::ValuesIn({TestRequestMethod::GET, TestRequestMethod::POST}))); + +TEST_P(HttpFilterBaseIntegrationTest, NoRequestLevelConfigurationShouldSucceed) { + Envoy::IntegrationStreamDecoderPtr response = getResponse(getHappyFlowResponseOrigin()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_TRUE(response->body().empty()); +} + +TEST_P(HttpFilterBaseIntegrationTest, EmptyJsonRequestLevelConfigurationShouldSucceed) { + setRequestLevelConfiguration("{}"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(getHappyFlowResponseOrigin()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_TRUE(response->body().empty()); +} + +TEST_P(HttpFilterBaseIntegrationTest, BadJsonAsRequestLevelConfigurationShouldFail) { + // When sending bad request-level configuration, the extension ought to reply directly. + setRequestLevelConfiguration("bad_json"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); + EXPECT_THAT(response->body(), HasSubstr(kBadConfigErrorSentinel)); +} + +TEST_P(HttpFilterBaseIntegrationTest, EmptyRequestLevelConfigurationShouldFail) { + // When sending empty request-level configuration, the extension ought to reply directly. + setRequestLevelConfiguration(""); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); + EXPECT_THAT(response->body(), HasSubstr(kBadConfigErrorSentinel)); +} + +} // namespace +} // namespace Nighthawk \ No newline at end of file From b189069e940e3f67e52c98ed62996bb9ec2a9213 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Thu, 24 Sep 2020 08:58:07 +0200 Subject: [PATCH 18/63] Use shared test facilities for time-tracking and dynamic-delay (#541) Switch tests for these extensions to use the recent shared test facilities. Eliminate tests we generalize in #533. Split out from #512 Signed-off-by: Otto van der Schaaf --- ...p_dynamic_delay_filter_integration_test.cc | 151 +++++++----------- ...p_time_tracking_filter_integration_test.cc | 105 +++--------- 2 files changed, 82 insertions(+), 174 deletions(-) diff --git a/test/server/http_dynamic_delay_filter_integration_test.cc b/test/server/http_dynamic_delay_filter_integration_test.cc index c4a38e429..3840a11ec 100644 --- a/test/server/http_dynamic_delay_filter_integration_test.cc +++ b/test/server/http_dynamic_delay_filter_integration_test.cc @@ -1,16 +1,18 @@ #include -#include "external/envoy/test/integration/http_integration.h" - #include "api/server/response_options.pb.h" #include "server/configuration.h" #include "server/http_dynamic_delay_filter.h" +#include "test/server/http_filter_integration_test_base.h" + #include "gtest/gtest.h" namespace Nighthawk { +const Envoy::Http::LowerCaseString kDelayHeaderString("x-envoy-fault-delay-request"); + /** * Support class for testing the dynamic delay filter. We rely on the fault filter for * inducing the actual delay, so this aims to prove that: @@ -20,57 +22,17 @@ namespace Nighthawk { * - Failure modes work. * - TODO(#393): An end to end test which proves that the interaction between this filter * and the fault filter work as expected. + * + * The Dynamic Delay filter communicates with the fault filter by adding kDelayHeaderString + * to the request headers. We use that in tests below to verify expectations. The fault filter + * accepts input values via request headers specified in milliseconds, so our expectations are + * also using milliseconds. */ class HttpDynamicDelayIntegrationTest - : public Envoy::HttpIntegrationTest, + : public HttpFilterIntegrationTestBase, public testing::TestWithParam { -protected: - HttpDynamicDelayIntegrationTest() - : HttpIntegrationTest(Envoy::Http::CodecClient::Type::HTTP1, GetParam()), - request_headers_({{":method", "GET"}, {":path", "/"}, {":authority", "host"}}), - delay_header_string_(Envoy::Http::LowerCaseString("x-envoy-fault-delay-request")) {} - - // We don't override SetUp(): tests in this file will call setup() instead to avoid having to - // create a fixture per filter configuration. - void setup(const std::string& config) { - config_helper_.addFilter(config); - HttpIntegrationTest::initialize(); - } - - // Fetches a response with request-level configuration set in the request header. - Envoy::IntegrationStreamDecoderPtr getResponse(absl::string_view request_level_config, - bool setup_for_upstream_request = true) { - const Envoy::Http::LowerCaseString key("x-nighthawk-test-server-config"); - Envoy::Http::TestRequestHeaderMapImpl request_headers = request_headers_; - request_headers.setCopy(key, request_level_config); - return getResponse(request_headers, setup_for_upstream_request); - } - - // Fetches a response with the default request headers, expecting the fake upstream to supply - // the response. - Envoy::IntegrationStreamDecoderPtr getResponse() { return getResponse(request_headers_); } - - // Fetches a response using the provided request headers. When setup_for_upstream_request - // is true, the expectation will be that an upstream request will be needed to provide a - // response. If it is set to false, the extension is expected to supply the response, and - // no upstream request ought to occur. - Envoy::IntegrationStreamDecoderPtr - getResponse(const Envoy::Http::TestRequestHeaderMapImpl& request_headers, - bool setup_for_upstream_request = true) { - cleanupUpstreamAndDownstream(); - codec_client_ = makeHttpConnection(lookupPort("http")); - Envoy::IntegrationStreamDecoderPtr response; - if (setup_for_upstream_request) { - response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); - } else { - response = codec_client_->makeHeaderOnlyRequest(request_headers); - response->waitForEndStream(); - } - return response; - } - - const Envoy::Http::TestRequestHeaderMapImpl request_headers_; - const Envoy::Http::LowerCaseString delay_header_string_; +public: + HttpDynamicDelayIntegrationTest() : HttpFilterIntegrationTestBase(GetParam()){}; }; INSTANTIATE_TEST_SUITE_P(IpVersions, HttpDynamicDelayIntegrationTest, @@ -78,69 +40,72 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, HttpDynamicDelayIntegrationTest, // Verify expectations with an empty dynamic-delay configuration. TEST_P(HttpDynamicDelayIntegrationTest, NoStaticConfiguration) { - setup(R"( + initializeFilterConfiguration(R"( name: dynamic-delay typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions )"); - // Don't send any config request header - getResponse(); - EXPECT_EQ(upstream_request_->headers().get(delay_header_string_), nullptr); - // Send a config request header with an empty / default config. Should be a no-op. - getResponse("{}"); - EXPECT_EQ(upstream_request_->headers().get(delay_header_string_), nullptr); - // Send a config request header, this should become effective. - getResponse("{static_delay: \"1.6s\"}"); - EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), - "1600"); - - // Send a malformed config request header. This ought to shortcut and directly reply, - // hence we don't expect an upstream request. - auto response = getResponse("bad_json", false); - EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); - EXPECT_EQ( - response->body(), - "dynamic-delay didn't understand the request: Error merging json config: Unable to parse " - "JSON as proto (INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json"); - // Send an empty config header, which ought to trigger failure mode as well. - response = getResponse("", false); - EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); - EXPECT_EQ( - response->body(), - "dynamic-delay didn't understand the request: Error merging json config: Unable to " - "parse JSON as proto (INVALID_ARGUMENT:Unexpected end of string. Expected a value.\n\n^): "); + // Don't send any config request header ... + getResponse(ResponseOrigin::UPSTREAM); + // ... we shouldn't observe any delay being requested via the upstream request headers. + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString), nullptr); + + // Send a config request header with an empty / default configuration .... + setRequestLevelConfiguration("{}"); + getResponse(ResponseOrigin::UPSTREAM); + // ... we shouldn't observe any delay being requested via the upstream request headers. + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString), nullptr); + + // Send a config request header requesting a 1.6s delay... + setRequestLevelConfiguration("{static_delay: \"1.6s\"}"); + getResponse(ResponseOrigin::UPSTREAM); + // ...we should observe a delay of 1.6s in the upstream request. + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1600"); } // Verify expectations with static/file-based static_delay configuration. TEST_P(HttpDynamicDelayIntegrationTest, StaticConfigurationStaticDelay) { - setup(R"EOF( + initializeFilterConfiguration(R"EOF( name: dynamic-delay typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions static_delay: 1.33s )EOF"); - getResponse(); - EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), - "1330"); - getResponse("{}"); - EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), - "1330"); - getResponse("{static_delay: \"0.2s\"}"); + + // Without any request-level configuration, we expect the statically configured static delay to + // apply. + getResponse(ResponseOrigin::UPSTREAM); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1330"); + + // With an empty request-level configuration, we expect the statically configured static delay to + // apply. + setRequestLevelConfiguration("{}"); + getResponse(ResponseOrigin::UPSTREAM); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1330"); + + // Overriding the statically configured static delay via request-level configuration should be + // reflected in the output. + setRequestLevelConfiguration("{static_delay: \"0.2s\"}"); + getResponse(ResponseOrigin::UPSTREAM); // TODO(#392): This fails, because the duration is a two-field message: it would make here to see // both the number of seconds and nanoseconds to be overridden. // However, the seconds part is set to '0', which equates to the default of the underlying int // type, and the fact that we are using proto3, which doesn't merge default values. // Hence the following expectation will fail, as it yields 1200 instead of the expected 200. - // EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), + // EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), // "200"); - getResponse("{static_delay: \"2.2s\"}"); - EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), - "2200"); + + // Overriding the statically configured static delay via request-level configuration should be + // reflected in the output. + setRequestLevelConfiguration("{static_delay: \"2.2s\"}"); + getResponse(ResponseOrigin::UPSTREAM); + // 2.2 seconds -> 2200 ms. + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "2200"); } // Verify expectations with static/file-based concurrency_based_linear_delay configuration. TEST_P(HttpDynamicDelayIntegrationTest, StaticConfigurationConcurrentDelay) { - setup(R"EOF( + initializeFilterConfiguration(R"EOF( name: dynamic-delay typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions @@ -148,8 +113,10 @@ name: dynamic-delay minimal_delay: 0.05s concurrency_delay_factor: 0.01s )EOF"); - getResponse(); - EXPECT_EQ(upstream_request_->headers().get(delay_header_string_)->value().getStringView(), "60"); + getResponse(ResponseOrigin::UPSTREAM); + // Based on the algorithm of concurrency_based_linear_delay, for the first request we expect to + // observe the configured minimal_delay + concurrency_delay_factor = 0.06s -> 60ms. + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "60"); } class ComputeTest : public testing::Test { diff --git a/test/server/http_time_tracking_filter_integration_test.cc b/test/server/http_time_tracking_filter_integration_test.cc index 55d08e9c9..5f1348c56 100644 --- a/test/server/http_time_tracking_filter_integration_test.cc +++ b/test/server/http_time_tracking_filter_integration_test.cc @@ -1,10 +1,5 @@ #include -#include "envoy/upstream/cluster_manager.h" -#include "envoy/upstream/upstream.h" - -#include "external/envoy/test/common/upstream/utility.h" -#include "external/envoy/test/integration/http_integration.h" #include "external/envoy/test/test_common/simulated_time_system.h" #include "api/server/response_options.pb.h" @@ -12,7 +7,8 @@ #include "server/configuration.h" #include "server/http_time_tracking_filter.h" -#include "server/well_known_headers.h" + +#include "test/server/http_filter_integration_test_base.h" #include "gtest/gtest.h" @@ -32,53 +28,10 @@ name: time-tracking )EOF"; class HttpTimeTrackingIntegrationTest - : public Envoy::HttpIntegrationTest, + : public HttpFilterIntegrationTestBase, public testing::TestWithParam { -protected: - HttpTimeTrackingIntegrationTest() - : HttpIntegrationTest(Envoy::Http::CodecClient::Type::HTTP1, GetParam()), - request_headers_({{":method", "GET"}, {":path", "/"}, {":authority", "host"}}) {} - - // We don't override SetUp(): tests in this file will call setup() instead to avoid having to - // create a fixture per filter configuration. - void setup(const std::string& config) { - config_helper_.addFilter(config); - HttpIntegrationTest::initialize(); - } - - // Fetches a response with request-level configuration set in the request header. - Envoy::IntegrationStreamDecoderPtr getResponse(absl::string_view request_level_config, - bool setup_for_upstream_request = true) { - Envoy::Http::TestRequestHeaderMapImpl request_headers = request_headers_; - request_headers.setCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - request_level_config); - return getResponse(request_headers, setup_for_upstream_request); - } - - // Fetches a response with the default request headers, expecting the fake upstream to supply - // the response. - Envoy::IntegrationStreamDecoderPtr getResponse() { return getResponse(request_headers_); } - - // Fetches a response using the provided request headers. When setup_for_upstream_request - // is true, the expectation will be that an upstream request will be needed to provide a - // response. If it is set to false, the extension is expected to supply the response, and - // no upstream request ought to occur. - Envoy::IntegrationStreamDecoderPtr - getResponse(const Envoy::Http::TestRequestHeaderMapImpl& request_headers, - bool setup_for_upstream_request = true) { - cleanupUpstreamAndDownstream(); - codec_client_ = makeHttpConnection(lookupPort("http")); - Envoy::IntegrationStreamDecoderPtr response; - if (setup_for_upstream_request) { - response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); - } else { - response = codec_client_->makeHeaderOnlyRequest(request_headers); - response->waitForEndStream(); - } - return response; - } - - const Envoy::Http::TestRequestHeaderMapImpl request_headers_; +public: + HttpTimeTrackingIntegrationTest() : HttpFilterIntegrationTestBase(GetParam()){}; }; INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTimeTrackingIntegrationTest, @@ -86,13 +39,17 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTimeTrackingIntegrationTest, // Verify expectations with static/file-based time-tracking configuration. TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForStaticConfiguration) { - setup(fmt::format(kProtoConfigTemplate, kDefaultProtoFragment)); - Envoy::IntegrationStreamDecoderPtr response = getResponse(); + initializeFilterConfiguration(fmt::format(kProtoConfigTemplate, kDefaultProtoFragment)); + + // As the first request doesn't have a prior one, we should not observe a delta. + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::UPSTREAM); int64_t latency; const Envoy::Http::HeaderEntry* latency_header_1 = response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); EXPECT_EQ(latency_header_1, nullptr); - response = getResponse(); + + // On the second request we should observe a delta. + response = getResponse(ResponseOrigin::UPSTREAM); const Envoy::Http::HeaderEntry* latency_header_2 = response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); ASSERT_NE(latency_header_2, nullptr); @@ -102,14 +59,17 @@ TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForStaticConfigura // Verify expectations with an empty time-tracking configuration. TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForPerRequestConfiguration) { - setup(fmt::format(kProtoConfigTemplate, "")); - // Don't send any config request header - getResponse(); - // Send a config request header with an empty / default config. Should be a no-op. - getResponse("{}"); - // Send a config request header, this should become effective. - Envoy::IntegrationStreamDecoderPtr response = - getResponse(fmt::format("{{{}}}", kDefaultProtoFragment)); + initializeFilterConfiguration(fmt::format(kProtoConfigTemplate, "")); + // As the first request doesn't have a prior one, we should not observe a delta. + setRequestLevelConfiguration("{}"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::UPSTREAM); + EXPECT_EQ(response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)), + nullptr); + + // With request level configuration indicating that the timing header should be emitted, + // we should be able to observe it. + setRequestLevelConfiguration(fmt::format("{{{}}}", kDefaultProtoFragment)); + response = getResponse(ResponseOrigin::UPSTREAM); const Envoy::Http::HeaderEntry* latency_header = response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); ASSERT_NE(latency_header, nullptr); @@ -120,25 +80,6 @@ TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForPerRequestConfi EXPECT_GT(latency, 0); } -TEST_P(HttpTimeTrackingIntegrationTest, BehavesWellWithBadPerRequestConfiguration) { - setup(fmt::format(kProtoConfigTemplate, "")); - // Send a malformed config request header. This ought to shortcut and directly reply, - // hence we don't expect an upstream request. - Envoy::IntegrationStreamDecoderPtr response = getResponse("bad_json", false); - EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); - EXPECT_EQ( - response->body(), - "time-tracking didn't understand the request: Error merging json config: Unable to parse " - "JSON as proto (INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json"); - // Send an empty config header, which ought to trigger failure mode as well. - response = getResponse("", false); - EXPECT_EQ(Envoy::Http::Utility::getResponseStatus(response->headers()), 500); - EXPECT_EQ( - response->body(), - "time-tracking didn't understand the request: Error merging json config: Unable to " - "parse JSON as proto (INVALID_ARGUMENT:Unexpected end of string. Expected a value.\n\n^): "); -} - class HttpTimeTrackingFilterConfigTest : public testing::Test, public Envoy::Event::TestUsingSimulatedTime {}; From e21ef4b1efd984fbbe14965cb6f1ca2b08dac4af Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 28 Sep 2020 22:37:48 +0200 Subject: [PATCH 19/63] Fix jitter behavior for large values. (#552) By chance weird behaviour was observed for when specifying large jitter values. This warranted analysis, and upon investigation the root problem seems to be that when configuring `--uniform-jitter` to a value that would allow the adjusted value(s) to overlap with the pacing of the underlying rate limiter, the new overlapping value could overwrite the old one that was scheduled. This modifies behaviour and tests to fix and consolidate, by modifying `DistributionSamplingRateLimiter` so that it can queue adjusted release timings instead of storing a single one. This will allow it to accumulate multiple release timings that are adjusted for jitter. This ensures that the pacing of the rate limiter that modifies timings to add jitter gets properly disconnected from the pacing of the underlying rate limiter. NOTE: There are no known consumers running into problems / inaccuracies because of this. To tease the problem out, either one needs to: - configure `--jitter-uniform` in a way that it would adjust timings to overlap with the frequency imposed by `--rps`, or - configure any combination of `--jitter-uniform > 0s` and `--burst-size > 0`. Signed-off-by: Otto van der Schaaf --- source/common/rate_limiter_impl.cc | 25 ++-- source/common/rate_limiter_impl.h | 5 +- test/rate_limiter_test.cc | 182 ++++++++++++++++++++++------- 3 files changed, 160 insertions(+), 52 deletions(-) diff --git a/source/common/rate_limiter_impl.cc b/source/common/rate_limiter_impl.cc index 43c007d98..15f1fd71b 100644 --- a/source/common/rate_limiter_impl.cc +++ b/source/common/rate_limiter_impl.cc @@ -156,14 +156,19 @@ DelegatingRateLimiterImpl::DelegatingRateLimiterImpl( random_distribution_generator_(std::move(random_distribution_generator)) {} bool DelegatingRateLimiterImpl::tryAcquireOne() { - if (distributed_start_ == absl::nullopt) { - if (rate_limiter_->tryAcquireOne()) { - distributed_start_ = timeSource().monotonicTime() + random_distribution_generator_(); - } - } - - if (distributed_start_ != absl::nullopt && distributed_start_ <= timeSource().monotonicTime()) { - distributed_start_ = absl::nullopt; + const Envoy::MonotonicTime now = timeSource().monotonicTime(); + if (rate_limiter_->tryAcquireOne()) { + const Envoy::MonotonicTime adjusted = now + random_distribution_generator_(); + // We track a sorted list of timings, where the one at the front is the one that should + // be applied the soonest. + distributed_timings_.insert( + std::upper_bound(distributed_timings_.begin(), distributed_timings_.end(), adjusted), + adjusted); + } + + if (!distributed_timings_.empty() && distributed_timings_.front() <= now) { + distributed_timings_.pop_front(); + sanity_check_pending_release_ = false; return true; } @@ -171,7 +176,9 @@ bool DelegatingRateLimiterImpl::tryAcquireOne() { } void DelegatingRateLimiterImpl::releaseOne() { - distributed_start_ = absl::nullopt; + RELEASE_ASSERT(!sanity_check_pending_release_, + "unexpected call to DelegatingRateLimiterImpl::releaseOne()"); + sanity_check_pending_release_ = true; rate_limiter_->releaseOne(); } diff --git a/source/common/rate_limiter_impl.h b/source/common/rate_limiter_impl.h index 1408ec09d..f4a5e8753 100644 --- a/source/common/rate_limiter_impl.h +++ b/source/common/rate_limiter_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/time.h" @@ -156,7 +157,9 @@ class DelegatingRateLimiterImpl : public ForwardingRateLimiterImpl, const RateLimiterDelegate random_distribution_generator_; private: - absl::optional distributed_start_; + std::list distributed_timings_; + // Used to enforce that releaseOne() is always paired with a successfull tryAcquireOne(). + bool sanity_check_pending_release_{true}; }; class UniformRandomDistributionSamplerImpl : public DiscreteNumericDistributionSampler { diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index 76d9b3e5d..d05ef9867 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -1,4 +1,5 @@ #include +#include #include "nighthawk/common/exception.h" @@ -71,7 +72,7 @@ TEST_F(RateLimiterTest, BurstingRateLimiterTest) { TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { Envoy::Event::SimulatedTimeSystem time_system; const auto schedule_delay = 10ms; - // We test regular flow, but also the flow where the first aquisition attempt comes after the + // We test regular flow, but also the flow where the first acquisition attempt comes after the // scheduled delay. This should be business as usual from a functional perspective, but internally // this rate limiter specializes on this case to log a warning message, and we want to cover that. for (const bool starting_late : std::vector{false, true}) { @@ -173,7 +174,9 @@ TEST_F(RateLimiterTest, DistributionSamplingRateLimiterImplTest) { std::move(sampler), std::move(mock_rate_limiter)); EXPECT_CALL(unsafe_mock_rate_limiter, tryAcquireOne).Times(tries).WillRepeatedly(Return(true)); - EXPECT_CALL(unsafe_mock_rate_limiter, releaseOne).Times(tries); + // 1 in a billion chance of failure to exceed max_expected_acquisitions. + const int max_expected_acquisitions = (tries / 2) + 30; + EXPECT_CALL(unsafe_mock_rate_limiter, releaseOne).Times(AtMost(max_expected_acquisitions)); int acquisitions = 0; // We used a 1ns upper bound. That means we can expect around 50% of acquisitions to succeed as @@ -181,53 +184,148 @@ TEST_F(RateLimiterTest, DistributionSamplingRateLimiterImplTest) { for (uint64_t i = 0; i < tries; i++) { if (rate_limiter->tryAcquireOne()) { acquisitions++; + // We test the release gets propagated to the mock rate limiter. + // also, the release will force DelegatingRateLimiterImpl to propagate tryAcquireOne. + rate_limiter->releaseOne(); } - // We test the release gets propagated to the mock rate limiter. - // also, the release will force DelegatingRateLimiterImpl to propagate tryAcquireOne. - rate_limiter->releaseOne(); } - // 1 in a billion chance of failure. - EXPECT_LT(acquisitions, (tries / 2) + 30); + EXPECT_LT(acquisitions, max_expected_acquisitions); } // A rate limiter determines when acquisition is allowed, but DistributionSamplingRateLimiterImpl -// may arbitrarily delay that. We test that principle here. -TEST_F(RateLimiterTest, DistributionSamplingRateLimiterImplSchedulingTest) { - auto mock_rate_limiter = std::make_unique>(); - MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; - Envoy::Event::SimulatedTimeSystem time_system; - auto* unsafe_discrete_numeric_distribution_sampler = new MockDiscreteNumericDistributionSampler(); - RateLimiterPtr rate_limiter = std::make_unique( - std::unique_ptr( - unsafe_discrete_numeric_distribution_sampler), - std::move(mock_rate_limiter)); - EXPECT_CALL(unsafe_mock_rate_limiter, timeSource) - .Times(AtLeast(1)) - .WillRepeatedly(ReturnRef(time_system)); +// may arbitrarily delay that. We test that principle with tests that use this fixture, which +// sets up a distribution sampling rate limiter instance to encapsulate a mock rate limiter, +// relying on simulated time and a mock discrete numberic distribution sampler. +class DistributionSamplingRateLimiterTest : public RateLimiterTest { +public: + DistributionSamplingRateLimiterTest() + : tmp_mock_inner_rate_limiter_(std::make_unique>()), + mock_inner_rate_limiter_(*tmp_mock_inner_rate_limiter_), + tmp_mock_discrete_numeric_distribution_sampler_( + std::make_unique()), + mock_discrete_numeric_distribution_sampler_( + *tmp_mock_discrete_numeric_distribution_sampler_), + rate_limiter_(std::make_unique( + std::move(tmp_mock_discrete_numeric_distribution_sampler_), + std::move(tmp_mock_inner_rate_limiter_))) { + EXPECT_CALL(mock_inner_rate_limiter_, timeSource).WillRepeatedly(ReturnRef(time_system_)); + } - EXPECT_CALL(unsafe_mock_rate_limiter, tryAcquireOne) - .Times(AtLeast(1)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(unsafe_mock_rate_limiter, releaseOne).Times(1); - EXPECT_CALL(*unsafe_discrete_numeric_distribution_sampler, getValue) - .Times(3) + Envoy::Event::SimulatedTimeSystem time_system_; + std::unique_ptr> tmp_mock_inner_rate_limiter_; + MockRateLimiter& mock_inner_rate_limiter_; + std::unique_ptr + tmp_mock_discrete_numeric_distribution_sampler_; + MockDiscreteNumericDistributionSampler& mock_discrete_numeric_distribution_sampler_; + RateLimiterPtr rate_limiter_; +}; + +TEST_F(DistributionSamplingRateLimiterTest, SingleAcquisition) { + EXPECT_CALL(mock_inner_rate_limiter_, tryAcquireOne) + .WillOnce(Return(true)) + .WillOnce(Return(false)) + .WillOnce(Return(false)); + // The distribution first yields a 1 ns offset. + EXPECT_CALL(mock_discrete_numeric_distribution_sampler_, getValue).WillOnce(Return(1)); + // We don't expect to be green lighted without moving time forward. + EXPECT_FALSE(rate_limiter_->tryAcquireOne()); + time_system_.advanceTimeWait(1ns); + EXPECT_TRUE(rate_limiter_->tryAcquireOne()); + EXPECT_FALSE(rate_limiter_->tryAcquireOne()); +} + +TEST_F(DistributionSamplingRateLimiterTest, QueuedAcquisition) { + EXPECT_CALL(mock_inner_rate_limiter_, tryAcquireOne) + .WillOnce(Return(true)) + .WillOnce(Return(true)) + .WillOnce(Return(false)) + .WillOnce(Return(false)) + .WillOnce(Return(false)); + // The distribution yields a 1 ns offset two times. + EXPECT_CALL(mock_discrete_numeric_distribution_sampler_, getValue) .WillOnce(Return(1)) - .WillOnce(Return(0)) .WillOnce(Return(1)); + // We do not expect to observe releases because we did not move time forward. + EXPECT_FALSE(rate_limiter_->tryAcquireOne()); + EXPECT_FALSE(rate_limiter_->tryAcquireOne()); + time_system_.advanceTimeWait(1ns); + // We moved time forward, release timings that have been queued up earlier should now be observed. + EXPECT_TRUE(rate_limiter_->tryAcquireOne()); + EXPECT_TRUE(rate_limiter_->tryAcquireOne()); + // This should be all of it, so no further acquisitions are to be expected. + EXPECT_FALSE(rate_limiter_->tryAcquireOne()); +} - // The distribution first yields a 1 ns offset. So we don't expect to be green lighted. - EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.advanceTimeWait(1ns); - EXPECT_TRUE(rate_limiter->tryAcquireOne()); - // We expect releaseOne to be propagated. - rate_limiter->releaseOne(); - // The distribution will yield an offset of 0ns, we expect success. - EXPECT_TRUE(rate_limiter->tryAcquireOne()); +TEST_F(DistributionSamplingRateLimiterTest, ReleaseOneFunctionsWhenAcquired) { + EXPECT_CALL(mock_inner_rate_limiter_, tryAcquireOne).WillOnce(Return(true)); + EXPECT_CALL(mock_discrete_numeric_distribution_sampler_, getValue).WillOnce(Return(0)); + EXPECT_TRUE(rate_limiter_->tryAcquireOne()); + EXPECT_CALL(mock_inner_rate_limiter_, releaseOne).Times(1); + rate_limiter_->releaseOne(); +} - // We don't advanceTimeWait, and the distribution will yield a 1ns offset. No green light. - EXPECT_FALSE(rate_limiter->tryAcquireOne()); - time_system.advanceTimeWait(1ns); - EXPECT_TRUE(rate_limiter->tryAcquireOne()); +// Calling releaseOne() without a prior acquisition is invavlid +TEST_F(DistributionSamplingRateLimiterTest, ReleaseOneDiesWhenNotAcquired) { + EXPECT_DEATH(rate_limiter_->releaseOne(), + "unexpected call to DelegatingRateLimiterImpl::releaseOne"); + EXPECT_CALL(mock_inner_rate_limiter_, tryAcquireOne).WillOnce(Return(true)); + EXPECT_CALL(mock_discrete_numeric_distribution_sampler_, getValue).WillOnce(Return(0)); + EXPECT_TRUE(rate_limiter_->tryAcquireOne()); + rate_limiter_->releaseOne(); + EXPECT_DEATH(rate_limiter_->releaseOne(), + "unexpected call to DelegatingRateLimiterImpl::releaseOne"); +} + +// The DistributionSamplingRateLimiter may queues up timings for deferred release later on. Here we +// verify those are deferred release timings happen at the expected points in time. This is +// important, because the associated distribution sampler may give the +// DistributionSamplingRateLimiter random time offsets as inputs. +TEST_F(DistributionSamplingRateLimiterTest, QueuedAcquisitionCorrectReleaseOrdering) { + // The vector below defines the sequence of timing offsets that the mock distribution sampler will + // yield. + std::vector input_acquisition_timings_ms = {0, 0, 15000, 7, 3, 700, 2, + 2, 1, 800, 4, 7, 9}; + uint64_t i = 0; + uint64_t j = 0; + EXPECT_CALL(mock_inner_rate_limiter_, tryAcquireOne) + .WillRepeatedly([&i, input_acquisition_timings_ms]() { + return ++i <= input_acquisition_timings_ms.size() ? true : false; + }); + EXPECT_CALL(mock_discrete_numeric_distribution_sampler_, getValue) + .Times(input_acquisition_timings_ms.size()) + .WillRepeatedly( + [&j, input_acquisition_timings_ms]() { return input_acquisition_timings_ms[j++] * 1e6; }); + + // Here we are at T0. The mock rate limiter isn't time dependent when it comes to releasing. + // So here we iterate over the expected input acquisition timings, and the outer rate limiter + // will buffer those that indicate an offset > 0. Zero-valued offsets ought to be released + // immediately. + std::vector acquisition_timings; + for (uint64_t k : input_acquisition_timings_ms) { + if (k == 0) { + EXPECT_TRUE(rate_limiter_->tryAcquireOne()); + acquisition_timings.push_back(0); + } else { + EXPECT_FALSE(rate_limiter_->tryAcquireOne()); + } + } + + // Now we will start moving the clock, and see if the accrued deferred releases result in the + // correct timings. + const std::chrono::seconds duration = 15s; + auto total_ms_elapsed = 0ms; + const auto kClockTick = 1ms; + do { + while (rate_limiter_->tryAcquireOne()) { + acquisition_timings.push_back(total_ms_elapsed.count()); + } + time_system_.advanceTimeWait(kClockTick); + total_ms_elapsed += kClockTick; + } while (total_ms_elapsed <= duration); + + // The observed timings should equal the sorted offsets we had at the input. + std::sort(input_acquisition_timings_ms.begin(), input_acquisition_timings_ms.end()); + EXPECT_EQ(acquisition_timings, input_acquisition_timings_ms); } class LinearRampingRateLimiterImplTest : public Test { @@ -410,18 +508,18 @@ TEST_F(ZipfRateLimiterImplTest, TimingVerificationTest) { std::make_unique(time_system, 10_Hz), q, v, ZipfRateLimiterImpl::ZipfBehavior::ZIPF_PSEUDO_RANDOM); const std::chrono::seconds duration = 15s; - std::vector aquisition_timings; + std::vector acquisition_timings; auto total_ms_elapsed = 0ms; auto clock_tick = 1ms; do { if (rate_limiter->tryAcquireOne()) { - aquisition_timings.push_back(total_ms_elapsed.count()); + acquisition_timings.push_back(total_ms_elapsed.count()); } time_system.advanceTimeWait(clock_tick); total_ms_elapsed += clock_tick; } while (total_ms_elapsed <= duration); - EXPECT_EQ(aquisition_timings, + EXPECT_EQ(acquisition_timings, std::vector({450, 750, 1250, 2350, 2850, 3850, 4150, 4350, 4450, 5750, 5950, 6350, 7850, 8350, 8550, 9850, 10150, 10450, 10550, 11950, 12250, 12550, 13250, 13550, 13650, 13750, 13850})); From 7aa7d3fe97c9db276f07ec0f0c59ec742f000c11 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 28 Sep 2020 23:52:05 +0200 Subject: [PATCH 20/63] Update Envoy to 5a87f1e59b42ad546698d389f6ccac9406534e17 (#554) Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 7c2264c93..51e9f0566 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "9eeba8fd427d9bd0ef947ec14a3157083cc7bf0e" # September 17th, 2020 -ENVOY_SHA = "4537bde6652ea00db9b45c126c0519619909bc0d79c6ede02d68a8782f8c1c67" +ENVOY_COMMIT = "5a87f1e59b42ad546698d389f6ccac9406534e17" # September 25th, 2020 +ENVOY_SHA = "739c62249bae60f633f91dee846825f1d5ddcc469d45ef370e57f1a010c13258" HDR_HISTOGRAM_C_VERSION = "0.11.1" # September 17th, 2020 HDR_HISTOGRAM_C_SHA = "8550071d4ae5c8229448f9b68469d6d42c620cd25111b49c696d00185e5f8329" From 6aa03318440805719d4953fbbaa3c4d16c433b39 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 28 Sep 2020 23:55:23 +0200 Subject: [PATCH 21/63] Fix import in Zipkin integration tests. (#556) Reported via Slack; confirmed to fix a problem when executing tests on some systems. Signed-off-by: Otto van der Schaaf --- test/integration/test_integration_zipkin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/test_integration_zipkin.py b/test/integration/test_integration_zipkin.py index 2b815d2d1..b981db8b9 100644 --- a/test/integration/test_integration_zipkin.py +++ b/test/integration/test_integration_zipkin.py @@ -4,7 +4,7 @@ # server_config needs to be explicitly imported to avoid an error, as http_test_server_fixture # relies on it. -from integration_test_fixtures import (http_test_server_fixture, server_config) +from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) from test.integration import asserts From 3102a8cd5e0bfaa30a56bf248168a66f4567be88 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 30 Sep 2020 16:29:44 +0200 Subject: [PATCH 22/63] Use system time for scheduling. (#549) Small refactor that makes the client rely on system time instead of monotonic time for scheduling the start of worker executions. System clocks can be synchronized across machines, and this may come in handy when we start facilitating horizontal scaling. Note: `SequencerImpl` gets modified to re-use the execution duration that the `RateLimiter` it uses already tracks, in favour of its own tracking. This is a small clean up. Apart from the actual switching from monotonic time to wall clock time, this should be a mechanical change. This change will make things easier if we would like to add an option to schedule the time at which an execution will start. This in turn could be useful when directing clients running on multiple machines to start, as a means to have them start at approximately the same time. (the approximation would mostly depend on how well the wall clock time is synchronised across machines that are involved). Signed-off-by: Otto van der Schaaf --- include/nighthawk/common/factories.h | 4 ++-- source/client/client_worker_impl.cc | 2 +- source/client/client_worker_impl.h | 2 +- source/client/factories_impl.cc | 14 ++++++----- source/client/factories_impl.h | 4 ++-- source/client/process_impl.cc | 2 +- source/common/cached_time_source_impl.h | 4 ++-- source/common/rate_limiter_impl.cc | 8 +++---- source/common/rate_limiter_impl.h | 4 ++-- source/common/sequencer_impl.cc | 12 ++++------ source/common/sequencer_impl.h | 12 +++------- source/common/termination_predicate_impl.cc | 4 ++-- source/common/termination_predicate_impl.h | 4 ++-- test/client_worker_test.cc | 2 +- test/factories_test.cc | 2 +- test/mocks/common/mock_sequencer_factory.h | 2 +- .../mock_termination_predicate_factory.h | 2 +- test/rate_limiter_test.cc | 9 ++++--- test/sequencer_test.cc | 24 ++++++++++++------- test/termination_predicate_test.cc | 2 +- 20 files changed, 59 insertions(+), 60 deletions(-) diff --git a/include/nighthawk/common/factories.h b/include/nighthawk/common/factories.h index c93097b79..20d5eb6a6 100644 --- a/include/nighthawk/common/factories.h +++ b/include/nighthawk/common/factories.h @@ -24,7 +24,7 @@ class SequencerFactory { const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time) const PURE; + const Envoy::SystemTime scheduled_starting_time) const PURE; }; class StatisticFactory { @@ -46,7 +46,7 @@ class TerminationPredicateFactory { virtual ~TerminationPredicateFactory() = default; virtual TerminationPredicatePtr create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time) const PURE; + const Envoy::SystemTime scheduled_starting_time) const PURE; }; /** diff --git a/source/client/client_worker_impl.cc b/source/client/client_worker_impl.cc index a5d456763..a6c23542c 100644 --- a/source/client/client_worker_impl.cc +++ b/source/client/client_worker_impl.cc @@ -19,7 +19,7 @@ ClientWorkerImpl::ClientWorkerImpl(Envoy::Api::Api& api, Envoy::ThreadLocal::Ins const SequencerFactory& sequencer_factory, const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, - const Envoy::MonotonicTime starting_time, + const Envoy::SystemTime starting_time, Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style) : WorkerImpl(api, tls, store), diff --git a/source/client/client_worker_impl.h b/source/client/client_worker_impl.h index 41b2660bc..0decef819 100644 --- a/source/client/client_worker_impl.h +++ b/source/client/client_worker_impl.h @@ -33,7 +33,7 @@ class ClientWorkerImpl : public WorkerImpl, virtual public ClientWorker { const SequencerFactory& sequencer_factory, const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, - const Envoy::MonotonicTime starting_time, + const Envoy::SystemTime starting_time, Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style); StatisticPtrMap statistics() const override; diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index 6a35458f7..0b3644f9d 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -63,10 +63,12 @@ BenchmarkClientPtr BenchmarkClientFactoryImpl::create( SequencerFactoryImpl::SequencerFactoryImpl(const Options& options) : OptionBasedFactoryImpl(options) {} -SequencerPtr SequencerFactoryImpl::create( - Envoy::TimeSource& time_source, Envoy::Event::Dispatcher& dispatcher, - const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, - Envoy::Stats::Scope& scope, const Envoy::MonotonicTime scheduled_starting_time) const { +SequencerPtr SequencerFactoryImpl::create(Envoy::TimeSource& time_source, + Envoy::Event::Dispatcher& dispatcher, + const SequencerTarget& sequencer_target, + TerminationPredicatePtr&& termination_predicate, + Envoy::Stats::Scope& scope, + const Envoy::SystemTime scheduled_starting_time) const { StatisticFactoryImpl statistic_factory(options_); Frequency frequency(options_.requestsPerSecond()); RateLimiterPtr rate_limiter = std::make_unique( @@ -87,7 +89,7 @@ SequencerPtr SequencerFactoryImpl::create( return std::make_unique( platform_util_, dispatcher, time_source, std::move(rate_limiter), sequencer_target, statistic_factory.create(), statistic_factory.create(), options_.sequencerIdleStrategy(), - std::move(termination_predicate), scope, scheduled_starting_time); + std::move(termination_predicate), scope); } StatisticFactoryImpl::StatisticFactoryImpl(const Options& options) @@ -184,7 +186,7 @@ TerminationPredicateFactoryImpl::TerminationPredicateFactoryImpl(const Options& TerminationPredicatePtr TerminationPredicateFactoryImpl::create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time) const { + const Envoy::SystemTime scheduled_starting_time) const { // We'll always link a predicate which checks for requests to cancel. TerminationPredicatePtr root_predicate = std::make_unique( diff --git a/source/client/factories_impl.h b/source/client/factories_impl.h index 5e508b65a..fa7e6c3cd 100644 --- a/source/client/factories_impl.h +++ b/source/client/factories_impl.h @@ -41,7 +41,7 @@ class SequencerFactoryImpl : public OptionBasedFactoryImpl, public SequencerFact SequencerPtr create(Envoy::TimeSource& time_source, Envoy::Event::Dispatcher& dispatcher, const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time) const override; + const Envoy::SystemTime scheduled_starting_time) const override; }; class StatisticFactoryImpl : public OptionBasedFactoryImpl, public StatisticFactory { @@ -73,7 +73,7 @@ class TerminationPredicateFactoryImpl : public OptionBasedFactoryImpl, public: TerminationPredicateFactoryImpl(const Options& options); TerminationPredicatePtr create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time) const override; + const Envoy::SystemTime scheduled_starting_time) const override; TerminationPredicate* linkConfiguredPredicates( TerminationPredicate& last_predicate, const TerminationPredicateMap& predicates, const TerminationPredicate::Status termination_status, Envoy::Stats::Scope& scope) const; diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index cc314c036..28f11e610 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -177,7 +177,7 @@ void ProcessImpl::createWorkers(const uint32_t concurrency) { // TODO(oschaaf): Arguably, this ought to be the job of a rate limiter with awareness of the // global status quo, which we do not have right now. This has been noted in the // track-for-future issue. - const auto first_worker_start = time_system_.monotonicTime() + kMinimalWorkerDelay; + const auto first_worker_start = time_system_.systemTime() + kMinimalWorkerDelay; const double inter_worker_delay_usec = (1. / options_.requestsPerSecond()) * 1000000 / concurrency; int worker_number = 0; diff --git a/source/common/cached_time_source_impl.h b/source/common/cached_time_source_impl.h index 8675ec1cc..41ccd3e9d 100644 --- a/source/common/cached_time_source_impl.h +++ b/source/common/cached_time_source_impl.h @@ -21,9 +21,9 @@ class CachedTimeSourceImpl : public Envoy::TimeSource { CachedTimeSourceImpl(Envoy::Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {} /** - * Calling this will trigger an assert. + * @return Envoy::SystemTime current system time. */ - Envoy::SystemTime systemTime() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }; + Envoy::SystemTime systemTime() override { return dispatcher_.timeSource().systemTime(); }; /** * @return Envoy::MonotonicTime cached monotonic time. diff --git a/source/common/rate_limiter_impl.cc b/source/common/rate_limiter_impl.cc index 15f1fd71b..7d4aad4fc 100644 --- a/source/common/rate_limiter_impl.cc +++ b/source/common/rate_limiter_impl.cc @@ -53,16 +53,16 @@ void BurstingRateLimiter::releaseOne() { } ScheduledStartingRateLimiter::ScheduledStartingRateLimiter( - RateLimiterPtr&& rate_limiter, const Envoy::MonotonicTime scheduled_starting_time) + RateLimiterPtr&& rate_limiter, const Envoy::SystemTime scheduled_starting_time) : ForwardingRateLimiterImpl(std::move(rate_limiter)), scheduled_starting_time_(scheduled_starting_time) { - if (timeSource().monotonicTime() >= scheduled_starting_time_) { + if (timeSource().systemTime() >= scheduled_starting_time_) { ENVOY_LOG(error, "Scheduled starting time exceeded. This may cause unintended bursty traffic."); } } bool ScheduledStartingRateLimiter::tryAcquireOne() { - if (timeSource().monotonicTime() < scheduled_starting_time_) { + if (timeSource().systemTime() < scheduled_starting_time_) { aquisition_attempted_ = true; return false; } @@ -76,7 +76,7 @@ bool ScheduledStartingRateLimiter::tryAcquireOne() { } void ScheduledStartingRateLimiter::releaseOne() { - if (timeSource().monotonicTime() < scheduled_starting_time_) { + if (timeSource().systemTime() < scheduled_starting_time_) { throw NighthawkException("Unexpected call to releaseOne()"); } return rate_limiter_->releaseOne(); diff --git a/source/common/rate_limiter_impl.h b/source/common/rate_limiter_impl.h index f4a5e8753..70a42e0ae 100644 --- a/source/common/rate_limiter_impl.h +++ b/source/common/rate_limiter_impl.h @@ -125,12 +125,12 @@ class ScheduledStartingRateLimiter : public ForwardingRateLimiterImpl, * @param scheduled_starting_time The starting time */ ScheduledStartingRateLimiter(RateLimiterPtr&& rate_limiter, - const Envoy::MonotonicTime scheduled_starting_time); + const Envoy::SystemTime scheduled_starting_time); bool tryAcquireOne() override; void releaseOne() override; private: - const Envoy::MonotonicTime scheduled_starting_time_; + const Envoy::SystemTime scheduled_starting_time_; bool aquisition_attempted_{false}; }; diff --git a/source/common/sequencer_impl.cc b/source/common/sequencer_impl.cc index 880a44ccc..1dd9eec51 100644 --- a/source/common/sequencer_impl.cc +++ b/source/common/sequencer_impl.cc @@ -14,13 +14,12 @@ SequencerImpl::SequencerImpl( Envoy::TimeSource& time_source, RateLimiterPtr&& rate_limiter, SequencerTarget target, StatisticPtr&& latency_statistic, StatisticPtr&& blocked_statistic, nighthawk::client::SequencerIdleStrategy::SequencerIdleStrategyOptions idle_strategy, - TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time) + TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope) : target_(std::move(target)), platform_util_(platform_util), dispatcher_(dispatcher), time_source_(time_source), rate_limiter_(std::move(rate_limiter)), latency_statistic_(std::move(latency_statistic)), - blocked_statistic_(std::move(blocked_statistic)), start_time_(scheduled_starting_time), - idle_strategy_(idle_strategy), termination_predicate_(std::move(termination_predicate)), + blocked_statistic_(std::move(blocked_statistic)), idle_strategy_(idle_strategy), + termination_predicate_(std::move(termination_predicate)), last_termination_status_(TerminationPredicate::Status::PROCEED), scope_(scope.createScope("sequencer.")), sequencer_stats_({ALL_SEQUENCER_STATS(POOL_COUNTER(*scope_))}) { @@ -57,8 +56,7 @@ void SequencerImpl::stop(bool failed) { spin_timer_.reset(); dispatcher_.exit(); unblockAndUpdateStatisticIfNeeded(time_source_.monotonicTime()); - const auto ran_for = - std::chrono::duration_cast(last_event_time_ - start_time_); + const auto ran_for = std::chrono::duration_cast(executionDuration()); ENVOY_LOG(info, "Stopping after {} ms. Initiated: {} / Completed: {}. " "(Completion rate was {} per second.)", @@ -93,7 +91,7 @@ void SequencerImpl::run(bool from_periodic_timer) { // More importantly, it may help avoid a class of bugs that could be more serious, depending on // functionality (TOC/TOU). dispatcher_.updateApproximateMonotonicTime(); - const auto now = last_event_time_ = time_source_.monotonicTime(); + const auto now = time_source_.monotonicTime(); last_termination_status_ = last_termination_status_ == TerminationPredicate::Status::PROCEED ? termination_predicate_->evaluateChain() diff --git a/source/common/sequencer_impl.h b/source/common/sequencer_impl.h index ed3665fd4..ff226b6d3 100644 --- a/source/common/sequencer_impl.h +++ b/source/common/sequencer_impl.h @@ -47,8 +47,7 @@ class SequencerImpl : public Sequencer, public Envoy::Logger::Loggableelapsed(); } double completionsPerSecond() const override { const double usec = - std::chrono::duration_cast(last_event_time_ - start_time_) - .count(); + std::chrono::duration_cast(executionDuration()).count(); return usec == 0 ? 0 : ((targets_completed_ / usec) * 1000000); } @@ -120,8 +116,6 @@ class SequencerImpl : public Sequencer, public Envoy::Logger::Loggable duration_ ? TerminationPredicate::Status::TERMINATE - : TerminationPredicate::Status::PROCEED; + return time_source_.systemTime() - start_ > duration_ ? TerminationPredicate::Status::TERMINATE + : TerminationPredicate::Status::PROCEED; } TerminationPredicate::Status StatsCounterAbsoluteThresholdTerminationPredicateImpl::evaluate() { diff --git a/source/common/termination_predicate_impl.h b/source/common/termination_predicate_impl.h index c1c761345..9a23a8f02 100644 --- a/source/common/termination_predicate_impl.h +++ b/source/common/termination_predicate_impl.h @@ -35,13 +35,13 @@ class DurationTerminationPredicateImpl : public TerminationPredicateBaseImpl { public: DurationTerminationPredicateImpl(Envoy::TimeSource& time_source, std::chrono::microseconds duration, - const Envoy::MonotonicTime start) + const Envoy::SystemTime start) : time_source_(time_source), start_(start), duration_(duration) {} TerminationPredicate::Status evaluate() override; private: Envoy::TimeSource& time_source_; - const Envoy::MonotonicTime start_; + const Envoy::SystemTime start_; std::chrono::microseconds duration_; }; diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 8ffdf6680..5f00cc723 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -118,7 +118,7 @@ TEST_F(ClientWorkerTest, BasicTest) { auto worker = std::make_unique( *api_, tls_, cluster_manager_ptr_, benchmark_client_factory_, termination_predicate_factory_, sequencer_factory_, request_generator_factory_, store_, worker_number, - time_system_.monotonicTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); + time_system_.systemTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); worker->start(); worker->waitForCompletion(); diff --git a/test/factories_test.cc b/test/factories_test.cc index fa4d6dbc7..1a410496a 100644 --- a/test/factories_test.cc +++ b/test/factories_test.cc @@ -92,7 +92,7 @@ class SequencerFactoryTest }; auto sequencer = factory.create(api_->timeSource(), dispatcher_, dummy_sequencer_target, std::make_unique(), stats_store_, - time_system.monotonicTime() + 10ms); + time_system.systemTime() + 10ms); EXPECT_NE(nullptr, sequencer.get()); } }; diff --git a/test/mocks/common/mock_sequencer_factory.h b/test/mocks/common/mock_sequencer_factory.h index 63c972f26..96983e24f 100644 --- a/test/mocks/common/mock_sequencer_factory.h +++ b/test/mocks/common/mock_sequencer_factory.h @@ -14,7 +14,7 @@ class MockSequencerFactory : public SequencerFactory { const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time)); + const Envoy::SystemTime scheduled_starting_time)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_termination_predicate_factory.h b/test/mocks/common/mock_termination_predicate_factory.h index e37e8f128..23aed4bf2 100644 --- a/test/mocks/common/mock_termination_predicate_factory.h +++ b/test/mocks/common/mock_termination_predicate_factory.h @@ -12,7 +12,7 @@ class MockTerminationPredicateFactory : public TerminationPredicateFactory { MOCK_CONST_METHOD3(create, TerminationPredicatePtr(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::MonotonicTime scheduled_starting_time)); + const Envoy::SystemTime scheduled_starting_time)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index d05ef9867..754bbd3bd 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -76,8 +76,7 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { // scheduled delay. This should be business as usual from a functional perspective, but internally // this rate limiter specializes on this case to log a warning message, and we want to cover that. for (const bool starting_late : std::vector{false, true}) { - const Envoy::MonotonicTime scheduled_starting_time = - time_system.monotonicTime() + schedule_delay; + const Envoy::SystemTime scheduled_starting_time = time_system.systemTime() + schedule_delay; std::unique_ptr mock_rate_limiter = std::make_unique(); MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; InSequence s; @@ -96,7 +95,7 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { } // We should expect zero releases until it is time to start. - while (time_system.monotonicTime() < scheduled_starting_time) { + while (time_system.systemTime() < scheduled_starting_time) { EXPECT_FALSE(rate_limiter->tryAcquireOne()); time_system.advanceTimeWait(1ms); } @@ -109,8 +108,8 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTestBadArgs) { Envoy::Event::SimulatedTimeSystem time_system; // Verify we enforce future-only scheduling. - for (const auto timing : std::vector{time_system.monotonicTime(), - time_system.monotonicTime() - 10ms}) { + for (const auto timing : + std::vector{time_system.systemTime(), time_system.systemTime() - 10ms}) { std::unique_ptr mock_rate_limiter = std::make_unique(); MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; EXPECT_CALL(unsafe_mock_rate_limiter, timeSource) diff --git a/test/sequencer_test.cc b/test/sequencer_test.cc index 83d30744d..82ad3f69e 100644 --- a/test/sequencer_test.cc +++ b/test/sequencer_test.cc @@ -146,6 +146,9 @@ class SequencerTestWithTimerEmulation : public SequencerTest { MockSequencerTarget* target() { return &target_; } TerminationPredicatePtr termination_predicate_; +protected: + Envoy::MonotonicTime simulation_start_; + private: NiceMock* timer1_; // not owned NiceMock* timer2_; // not owned @@ -155,7 +158,6 @@ class SequencerTestWithTimerEmulation : public SequencerTest { bool timer1_set_{}; bool timer2_set_{}; bool stopped_{}; - Envoy::MonotonicTime simulation_start_; }; // Basic rate limiter interaction test. @@ -165,13 +167,14 @@ TEST_F(SequencerTestWithTimerEmulation, RateLimiterInteraction) { SequencerImpl sequencer(platform_util_, *dispatcher_, time_system_, std::move(rate_limiter_), callback, std::make_unique(), std::make_unique(), SequencerIdleStrategy::SLEEP, - std::move(termination_predicate_), store_, time_system_.monotonicTime()); + std::move(termination_predicate_), store_); // Have the mock rate limiter gate two calls, and block everything else. EXPECT_CALL(rate_limiter_unsafe_ref_, tryAcquireOne()) .Times(AtLeast(3)) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillRepeatedly(Return(false)); + EXPECT_CALL(rate_limiter_unsafe_ref_, elapsed()).Times(2); EXPECT_CALL(*target(), callback(_)).Times(2).WillOnce(Return(true)).WillOnce(Return(true)); expectDispatcherRun(); EXPECT_CALL(platform_util_, sleep(_)).Times(AtLeast(1)); @@ -186,13 +189,14 @@ TEST_F(SequencerTestWithTimerEmulation, RateLimiterSaturatedTargetInteraction) { SequencerImpl sequencer(platform_util_, *dispatcher_, time_system_, std::move(rate_limiter_), callback, std::make_unique(), std::make_unique(), SequencerIdleStrategy::SLEEP, - std::move(termination_predicate_), store_, time_system_.monotonicTime()); + std::move(termination_predicate_), store_); EXPECT_CALL(rate_limiter_unsafe_ref_, tryAcquireOne()) .Times(AtLeast(3)) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillRepeatedly(Return(false)); + EXPECT_CALL(rate_limiter_unsafe_ref_, elapsed()).Times(2); EXPECT_CALL(*target(), callback(_)).Times(2).WillOnce(Return(true)).WillOnce(Return(false)); @@ -224,10 +228,10 @@ class SequencerIntegrationTest : public SequencerTestWithTimerEmulation { std::unique_ptr rate_limiter_; void testRegularFlow(SequencerIdleStrategy::SequencerIdleStrategyOptions idle_strategy) { - SequencerImpl sequencer( - platform_util_, *dispatcher_, time_system_, std::move(rate_limiter_), sequencer_target_, - std::make_unique(), std::make_unique(), - idle_strategy, std::move(termination_predicate_), store_, time_system_.monotonicTime()); + SequencerImpl sequencer(platform_util_, *dispatcher_, time_system_, std::move(rate_limiter_), + sequencer_target_, std::make_unique(), + std::make_unique(), idle_strategy, + std::move(termination_predicate_), store_); EXPECT_EQ(0, callback_test_count_); EXPECT_EQ(0, sequencer.latencyStatistic().count()); sequencer.start(); @@ -236,6 +240,8 @@ class SequencerIntegrationTest : public SequencerTestWithTimerEmulation { EXPECT_EQ(test_number_of_intervals_, sequencer.latencyStatistic().count()); EXPECT_EQ(0, sequencer.blockedStatistic().count()); EXPECT_EQ(2, sequencer.statistics().size()); + const auto execution_duration = time_system_.monotonicTime() - simulation_start_; + EXPECT_EQ(sequencer.executionDuration(), execution_duration); } }; @@ -265,7 +271,7 @@ TEST_F(SequencerIntegrationTest, AlwaysSaturatedTargetTest) { SequencerImpl sequencer(platform_util_, *dispatcher_, time_system_, std::move(rate_limiter_), callback, std::make_unique(), std::make_unique(), SequencerIdleStrategy::SLEEP, - std::move(termination_predicate_), store_, time_system_.monotonicTime()); + std::move(termination_predicate_), store_); EXPECT_CALL(platform_util_, sleep(_)).Times(AtLeast(1)); sequencer.start(); sequencer.waitForCompletion(); @@ -283,7 +289,7 @@ TEST_F(SequencerIntegrationTest, CallbacksDoNotInfluenceTestDuration) { SequencerImpl sequencer(platform_util_, *dispatcher_, time_system_, std::move(rate_limiter_), callback, std::make_unique(), std::make_unique(), SequencerIdleStrategy::SLEEP, - std::move(termination_predicate_), store_, time_system_.monotonicTime()); + std::move(termination_predicate_), store_); EXPECT_CALL(platform_util_, sleep(_)).Times(AtLeast(1)); auto pre_timeout = time_system_.monotonicTime(); sequencer.start(); diff --git a/test/termination_predicate_test.cc b/test/termination_predicate_test.cc index 4ab0bab53..387883152 100644 --- a/test/termination_predicate_test.cc +++ b/test/termination_predicate_test.cc @@ -25,7 +25,7 @@ class TerminationPredicateTest : public Test { TEST_F(TerminationPredicateTest, DurationTerminationPredicateImplTest) { const auto duration = 100us; - DurationTerminationPredicateImpl pred(time_system, duration, time_system.monotonicTime()); + DurationTerminationPredicateImpl pred(time_system, duration, time_system.systemTime()); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::PROCEED); // move to the edge. time_system.advanceTimeWait(duration); From 61996df8b68d71bade3fb1ac8b94ee4f48bc0e27 Mon Sep 17 00:00:00 2001 From: "Jiajun Ye (Jason)" Date: Mon, 5 Oct 2020 09:54:59 -0400 Subject: [PATCH 23/63] Update overview doc to include user-specified logging (#550) Update overview doc to include user-specified logging. Signed-off-by: jiajunye --- docs/root/overview.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/root/overview.md b/docs/root/overview.md index 58fa5f117..7d53c419f 100644 --- a/docs/root/overview.md +++ b/docs/root/overview.md @@ -189,3 +189,11 @@ other formats (e.g. human, fortio). It can be very useful to always store the json output format, yet be able to easily get to one of the other output formats. It’s like having the cake and eating it too! +## User-specified Nighthawk logging + +Users of Nighthawk can specify custom format and destination (logging sink +delegate) for all Nighthawk logging messages. Nighthawk utilizes the Envoy's +logging mechanism by performing all logging via the **ENVOY_LOG** macro. To +customize this mechanism, users need to perform two steps: +1. Create a logging sink delegate inherited from [Envoy SinkDelegate](https://github.com/envoyproxy/envoy/blob/master/source/common/common/logger.h). +2. Construct a ServiceImpl object with an [Envoy Logger Context](https://github.com/envoyproxy/envoy/blob/master/source/common/common/logger.h) which contains user-specified log level and format. From 7fb012c6109be6d8db62b1ad2bbd2e6748070d52 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 5 Oct 2020 22:14:35 +0200 Subject: [PATCH 24/63] Update Envoy to c318156496edc46c844822a3d1f107ee496fa449 (#557) Signed-off-by: Otto van der Schaaf --- .bazelrc | 7 ++----- .circleci/config.yml | 4 ++-- bazel/repositories.bzl | 4 ++-- source/client/process_impl.cc | 5 +++-- test/server/http_test_server_filter_integration_test.cc | 2 +- 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/.bazelrc b/.bazelrc index 12995e93c..0a6a7bd47 100644 --- a/.bazelrc +++ b/.bazelrc @@ -43,9 +43,6 @@ build --action_env=CXX build --action_env=LLVM_CONFIG build --action_env=PATH -# Skip system ICU linking. -build --@com_googlesource_googleurl//build_config:system_icu=0 - # Common flags for sanitizers build:sanitizer --define tcmalloc=disabled build:sanitizer --linkopt -ldl @@ -149,7 +146,7 @@ build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage -build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" coverage:test-coverage --test_arg="-l trace" coverage:fuzz-coverage --config=plain-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh @@ -238,7 +235,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:1526786b8f5cfce7a40829a0c527b5a27570889c build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.circleci/config.yml b/.circleci/config.yml index e34bc1422..a5b8b130b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ references: - envoy-build-image: &envoy-build-image # September 9th, 2020 - envoyproxy/envoy-build-ubuntu:e7ea4e81bbd5028abb9d3a2f2c0afe063d9b62c0 + envoy-build-image: &envoy-build-image # October 2nd, 2020 + envoyproxy/envoy-build-ubuntu:1526786b8f5cfce7a40829a0c527b5a27570889c version: 2 jobs: build: diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 51e9f0566..7b8ff6d20 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "5a87f1e59b42ad546698d389f6ccac9406534e17" # September 25th, 2020 -ENVOY_SHA = "739c62249bae60f633f91dee846825f1d5ddcc469d45ef370e57f1a010c13258" +ENVOY_COMMIT = "c318156496edc46c844822a3d1f107ee496fa449" # October 2nd, 2020 +ENVOY_SHA = "dc4ee70d317f0310b96cb803a5c02b42589f5d1dfbfb2989f4bf72800aaa799a" HDR_HISTOGRAM_C_VERSION = "0.11.1" # September 17th, 2020 HDR_HISTOGRAM_C_SHA = "8550071d4ae5c8229448f9b68469d6d42c620cd25111b49c696d00185e5f8329" diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 28f11e610..e6bfcf406 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -72,7 +72,7 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options) override { if (protocol == Envoy::Http::Protocol::Http11 || protocol == Envoy::Http::Protocol::Http10) { auto* h1_pool = - new Http1PoolImpl(dispatcher, host, priority, options, transport_socket_options); + new Http1PoolImpl(dispatcher, random_, host, priority, options, transport_socket_options); h1_pool->setConnectionReuseStrategy(connection_reuse_strategy_); h1_pool->setPrefetchConnections(prefetch_connections_); return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; @@ -100,7 +100,8 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ : process_wide), time_system_(time_system), stats_allocator_(symbol_table_), store_root_(stats_allocator_), api_(std::make_unique(platform_impl_.threadFactory(), store_root_, - time_system_, platform_impl_.fileSystem())), + time_system_, platform_impl_.fileSystem(), + generator_)), dispatcher_(api_->allocateDispatcher("main_thread")), benchmark_client_factory_(options), termination_predicate_factory_(options), sequencer_factory_(options), request_generator_factory_(options), options_(options), init_manager_("nh_init_manager"), diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 702b77c3c..3ae792d0c 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -52,7 +52,7 @@ class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, type, dispatcher->createClientConnection(addr, Envoy::Network::Address::InstanceConstSharedPtr(), Envoy::Network::Test::createRawBufferSocket(), nullptr), - host_description, *dispatcher); + host_description, *dispatcher, random_); Envoy::BufferingStreamDecoderPtr response( new Envoy::BufferingStreamDecoder([&client, &dispatcher]() -> void { client.close(); From a358469f2737fcf5d92360bc38f5825afa3ffe18 Mon Sep 17 00:00:00 2001 From: wjuan-AFK <66322422+wjuan-AFK@users.noreply.github.com> Date: Mon, 5 Oct 2020 16:16:40 -0400 Subject: [PATCH 25/63] Adding config factory (#522) Adding config factories for RequestSourcePlugins. Specifically starting with FileBasedRequestSourcePlugin. These will be provided as options to be used inside RequestSourceFactory. This is part of a series of PRs for providing the ability to use statically linked requestSources. Signed-off-by: William Juan <66322422+wjuan-AFK@users.noreply.github.com> --- api/client/options.proto | 6 + api/request_source/BUILD | 12 ++ .../request_source_plugin.proto | 36 ++++ include/nighthawk/request_source/BUILD | 24 +++ .../request_source_plugin_config_factory.h | 39 ++++ source/request_source/BUILD | 33 ++++ .../request_options_list_plugin_impl.cc | 89 +++++++++ .../request_options_list_plugin_impl.h | 84 ++++++++ test/request_source/BUILD | 48 +++++ .../request_source_plugin_test.cc | 179 ++++++++++++++++++ test/request_source/stub_plugin_impl.cc | 47 +++++ test/request_source/stub_plugin_impl.h | 56 ++++++ .../request_source/test_data/test-config.yaml | 13 ++ tools/check_format.sh | 4 +- 14 files changed, 668 insertions(+), 2 deletions(-) create mode 100644 api/request_source/request_source_plugin.proto create mode 100644 include/nighthawk/request_source/BUILD create mode 100644 include/nighthawk/request_source/request_source_plugin_config_factory.h create mode 100644 source/request_source/BUILD create mode 100644 source/request_source/request_options_list_plugin_impl.cc create mode 100644 source/request_source/request_options_list_plugin_impl.h create mode 100644 test/request_source/BUILD create mode 100644 test/request_source/request_source_plugin_test.cc create mode 100644 test/request_source/stub_plugin_impl.cc create mode 100644 test/request_source/stub_plugin_impl.h create mode 100644 test/request_source/test_data/test-config.yaml diff --git a/api/client/options.proto b/api/client/options.proto index 6e2aa1841..b5955a246 100644 --- a/api/client/options.proto +++ b/api/client/options.proto @@ -18,6 +18,12 @@ message RequestOptions { google.protobuf.UInt32Value request_body_size = 3 [(validate.rules).uint32 = {lte: 4194304}]; } +// Used for providing multiple request options, especially for RequestSourcePlugins. +message RequestOptionsList { + // Each option is used for a separate request to be sent by the requestSource. + repeated RequestOptions options = 1; +} + // Configures a remote gRPC source that will deliver to-be-replayed request data to Nighthawks // workers. message RequestSource { diff --git a/api/request_source/BUILD b/api/request_source/BUILD index 4e808f691..9cf50e7ff 100644 --- a/api/request_source/BUILD +++ b/api/request_source/BUILD @@ -16,6 +16,18 @@ api_cc_py_proto_library( ], ) +api_cc_py_proto_library( + name = "request_source_plugin", + srcs = [ + "request_source_plugin.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "@envoy_api//envoy/config/core/v3:pkg", + "@nighthawk//api/client:base", + ], +) + cc_grpc_library( name = "grpc_request_source_service_lib", srcs = [ diff --git a/api/request_source/request_source_plugin.proto b/api/request_source/request_source_plugin.proto new file mode 100644 index 000000000..1af69a60a --- /dev/null +++ b/api/request_source/request_source_plugin.proto @@ -0,0 +1,36 @@ +// Config protos for the Request Source Plugin Config Factories. +syntax = "proto3"; + +package nighthawk.request_source; + +import "google/protobuf/wrappers.proto"; +import "validate/validate.proto"; +import "api/client/options.proto"; + +// Configuration for FileBasedPluginRequestSource (plugin name: +// "nighthawk.file-based-request-source-plugin") +// The factory will load the RequestOptionsList from the file, and then passes it to the +// requestSource it generates. The resulting request source will loop over the RequestOptionsList it +// is passed. +message FileBasedPluginConfig { + // The file_path is the path to a file that contains a RequestOptionList in json or yaml format. + string file_path = 1; + // The pluginfactory makes requestSources that will generate requests from the RequestOptionList + // up to num_requests number of times. If num_requests exceeds the number of RequestOptions in the + // RequestOptionList located in the file at file_path, it will loop. num_requests = 0 means no + // limit on the number of requests to be produced. + google.protobuf.UInt32Value num_requests = 2 [(validate.rules).uint32 = {gte: 0, lte: 1000000}]; + // The pluginfactory will load the file located in file_path as long as it is below max_file_size, + // if it's too large it will throw an error. + google.protobuf.UInt32Value max_file_size = 3 [(validate.rules).uint32 = {lte: 1000000}]; +} + +// Configuration for StubPluginRequestSource (plugin name: "nighthawk.stub-request-source-plugin") +// The plugin does nothing. This is for testing and comparison of the Request Source Plugin Factory +// mechanism using a minimal version of plugin that does not require a more complicated proto or +// file reading. +message StubPluginConfig { + // test input value which is the only output value in the headers produced from the + // requestGenerator for the StubRequestSource. + google.protobuf.DoubleValue test_value = 1; +} diff --git a/include/nighthawk/request_source/BUILD b/include/nighthawk/request_source/BUILD new file mode 100644 index 000000000..7185a6dae --- /dev/null +++ b/include/nighthawk/request_source/BUILD @@ -0,0 +1,24 @@ +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_basic_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_basic_cc_library( + name = "request_source_plugin_config_factory_lib", + hdrs = [ + "request_source_plugin_config_factory.h", + ], + include_prefix = "nighthawk/request_source", + deps = [ + "//api/request_source:request_source_plugin_cc_proto", + "//include/nighthawk/common:request_source_lib", + "@envoy//include/envoy/common:base_includes", + "@envoy//include/envoy/config:typed_config_interface", + "@envoy//source/common/api:api_lib_with_external_headers", + ], +) diff --git a/include/nighthawk/request_source/request_source_plugin_config_factory.h b/include/nighthawk/request_source/request_source_plugin_config_factory.h new file mode 100644 index 000000000..3feccffc9 --- /dev/null +++ b/include/nighthawk/request_source/request_source_plugin_config_factory.h @@ -0,0 +1,39 @@ +#pragma once + +#include "envoy/api/api.h" +#include "envoy/common/pure.h" +#include "envoy/config/typed_config.h" + +#include "nighthawk/common/request_source.h" + +namespace Nighthawk { + +// A factory that must be implemented for each RequestSourcePlugin. It instantiates the specific +// RequestSourcePlugin class after unpacking the plugin-specific config proto. +class RequestSourcePluginConfigFactory : public Envoy::Config::TypedFactory { +public: + ~RequestSourcePluginConfigFactory() override = default; + // All request source plugins will be in this category. + std::string category() const override { return "nighthawk.request_source_plugin"; } + + // Instantiates the specific RequestSourcePlugin class. Casts |message| to Any, unpacks it to the + // plugin-specific proto, and passes the strongly typed proto to the plugin constructor. + // + // @param typed_config Any typed_config proto taken from the TypedExtensionConfig. This should be + // a type listed in request_source_plugin_config.proto + // + // @param api Api parameter that contains timesystem, filesystem, and threadfactory. + // + // @param header RequestHeaderMapPtr parameter that acts as a template header for the + // requestSource to modify when generating requests. + // + // @return RequestSourcePtr Pointer to the new instance of RequestSource. + // + // @throw Envoy::EnvoyException If the Any proto cannot be unpacked as the type expected by the + // plugin. + virtual RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& typed_config, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) PURE; +}; + +} // namespace Nighthawk diff --git a/source/request_source/BUILD b/source/request_source/BUILD new file mode 100644 index 000000000..9fdbf5151 --- /dev/null +++ b/source/request_source/BUILD @@ -0,0 +1,33 @@ +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "request_options_list_plugin_impl", + srcs = [ + "request_options_list_plugin_impl.cc", + ], + hdrs = [ + "request_options_list_plugin_impl.h", + ], + repository = "@envoy", + visibility = ["//visibility:public"], + deps = [ + "//include/nighthawk/request_source:request_source_plugin_config_factory_lib", + "//source/common:nighthawk_common_lib", + "//source/common:request_impl_lib", + "//source/common:request_source_impl_lib", + "@envoy//source/common/common:thread_lib_with_external_headers", + "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", + "@envoy//source/common/protobuf:protobuf_with_external_headers", + "@envoy//source/common/protobuf:utility_lib_with_external_headers", + "@envoy//source/exe:platform_header_lib_with_external_headers", + "@envoy//source/exe:platform_impl_lib", + ], +) diff --git a/source/request_source/request_options_list_plugin_impl.cc b/source/request_source/request_options_list_plugin_impl.cc new file mode 100644 index 000000000..7fd25f3aa --- /dev/null +++ b/source/request_source/request_options_list_plugin_impl.cc @@ -0,0 +1,89 @@ +#include "request_source/request_options_list_plugin_impl.h" + +#include "external/envoy/source/common/protobuf/message_validator_impl.h" +#include "external/envoy/source/common/protobuf/utility.h" +#include "external/envoy/source/exe/platform_impl.h" + +#include "api/client/options.pb.h" + +#include "common/request_impl.h" +#include "common/request_source_impl.h" + +namespace Nighthawk { +std::string OptionsListFromFileRequestSourceFactory::name() const { + return "nighthawk.file-based-request-source-plugin"; +} + +Envoy::ProtobufTypes::MessagePtr OptionsListFromFileRequestSourceFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +RequestSourcePtr OptionsListFromFileRequestSourceFactory::createRequestSourcePlugin( + const Envoy::Protobuf::Message& message, Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) { + const auto& any = dynamic_cast(message); + nighthawk::request_source::FileBasedPluginConfig config; + Envoy::MessageUtil util; + + util.unpackTo(any, config); + if (api.fileSystem().fileSize(config.file_path()) > config.max_file_size().value()) { + throw NighthawkException("file size must be less than max_file_size"); + } + + // Locking to avoid issues with multiple threads reading the same file. + { + Envoy::Thread::LockGuard lock_guard(file_lock_); + // Reading the file only the first time. + if (options_list_.options_size() == 0) { + util.loadFromFile(config.file_path(), options_list_, + Envoy::ProtobufMessage::getStrictValidationVisitor(), api, true); + } + } + return std::make_unique(config.num_requests().value(), + std::move(header), options_list_); +} + +REGISTER_FACTORY(OptionsListFromFileRequestSourceFactory, RequestSourcePluginConfigFactory); + +RequestOptionsListRequestSource::RequestOptionsListRequestSource( + const uint32_t total_requests, Envoy::Http::RequestHeaderMapPtr header, + const nighthawk::client::RequestOptionsList& options_list) + : header_(std::move(header)), options_list_(options_list), total_requests_(total_requests) {} + +RequestGenerator RequestOptionsListRequestSource::get() { + request_count_.push_back(0); + uint32_t& lambda_counter = request_count_.back(); + RequestGenerator request_generator = [this, lambda_counter]() mutable -> RequestPtr { + // if request_max is 0, then we never stop generating requests. + if (lambda_counter >= total_requests_ && total_requests_ != 0) { + return nullptr; + } + + // Increment the counter and get the request_option from the list for the current iteration. + const uint32_t index = lambda_counter % options_list_.options_size(); + nighthawk::client::RequestOptions request_option = options_list_.options().at(index); + ++lambda_counter; + + // Initialize the header with the values from the default header. + Envoy::Http::RequestHeaderMapPtr header = Envoy::Http::RequestHeaderMapImpl::create(); + Envoy::Http::HeaderMapImpl::copyFrom(*header, *header_); + + // Override the default values with the values from the request_option + header->setMethod(envoy::config::core::v3::RequestMethod_Name(request_option.request_method())); + const uint32_t content_length = request_option.request_body_size().value(); + if (content_length > 0) { + header->setContentLength(content_length); + } + for (const envoy::config::core::v3::HeaderValueOption& option_header : + request_option.request_headers()) { + auto lower_case_key = Envoy::Http::LowerCaseString(std::string(option_header.header().key())); + header->setCopy(lower_case_key, std::string(option_header.header().value())); + } + return std::make_unique(std::move(header)); + }; + return request_generator; +} + +void RequestOptionsListRequestSource::initOnThread() {} + +} // namespace Nighthawk \ No newline at end of file diff --git a/source/request_source/request_options_list_plugin_impl.h b/source/request_source/request_options_list_plugin_impl.h new file mode 100644 index 000000000..3fbf485ff --- /dev/null +++ b/source/request_source/request_options_list_plugin_impl.h @@ -0,0 +1,84 @@ +// Implementations of RequestSourceConfigFactories that make a RequestOptionsListRequestSource. +#pragma once + +#include "envoy/registry/registry.h" + +#include "nighthawk/request_source/request_source_plugin_config_factory.h" + +#include "external/envoy/source/common/common/lock_guard.h" +#include "external/envoy/source/common/common/thread.h" + +#include "api/client/options.pb.h" +#include "api/request_source/request_source_plugin.pb.h" + +#include "common/uri_impl.h" + +namespace Nighthawk { + +// Sample Request Source for small RequestOptionsLists. Loads a copy of the RequestOptionsList in +// memory and replays them. +// @param total_requests The number of requests the requestGenerator produced by get() will +// generate. 0 means it is unlimited. +// @param header the default header that will be overridden by values taken from the options_list, +// any values not overridden will be used. +// @param options_list This is const because it is intended to be shared by multiple threads. The +// RequestGenerator produced by get() will use options from the options_list to overwrite values in +// the default header, and create new requests. if total_requests is greater than the length of +// options_list, it will loop. This is not thread safe. +class RequestOptionsListRequestSource : public RequestSource { +public: + RequestOptionsListRequestSource(const uint32_t total_requests, + Envoy::Http::RequestHeaderMapPtr header, + const nighthawk::client::RequestOptionsList& options_list); + + // This get function is not thread safe, because multiple threads calling get simultaneously will + // result in a collision as it attempts to update its request_count_. + RequestGenerator get() override; + + // default implementation + void initOnThread() override; + +private: + Envoy::Http::RequestHeaderMapPtr header_; + const nighthawk::client::RequestOptionsList& options_list_; + std::vector request_count_; + const uint32_t total_requests_; +}; + +// Factory that creates a RequestOptionsListRequestSource from a FileBasedPluginConfig proto. +// Registered as an Envoy plugin. +// Implementation of RequestSourceConfigFactory which produces a RequestSource that keeps an +// RequestOptionsList in memory, and loads it with the RequestOptions taken from a file. All plugins +// configuration are specified in the request_source_plugin.proto. This class is not thread-safe, +// because it loads its RequestOptionlist in memory from a file when first called. +// Usage: assume you are passed an appropriate Any type object called config, an Api object called +// api, and a default header called header. auto& config_factory = +// Envoy::Config::Utility::getAndCheckFactoryByName( +// "nighthawk.file-based-request-source-plugin"); +// RequestSourcePtr plugin = +// config_factory.createRequestSourcePlugin(config, std::move(api), std::move(header)); +class OptionsListFromFileRequestSourceFactory : public virtual RequestSourcePluginConfigFactory { +public: + std::string name() const override; + + Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + // This implementation is not thread safe. Only the first call to createRequestSourcePlugin will + // load the file from memory and subsequent calls just make a copy of the options_list that was + // already loaded. The OptionsListFromFileRequestSourceFactory will not work with multiple + // different files for this reason. + // This method will also error if the file can not be loaded correctly, e.g. the file is too large + // or could not be found. + RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& message, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) override; + +private: + Envoy::Thread::MutexBasicLockable file_lock_; + nighthawk::client::RequestOptionsList options_list_; +}; + +// This factory will be activated through RequestSourceFactory in factories.h +DECLARE_FACTORY(OptionsListFromFileRequestSourceFactory); + +} // namespace Nighthawk \ No newline at end of file diff --git a/test/request_source/BUILD b/test/request_source/BUILD new file mode 100644 index 000000000..9b1e6bf18 --- /dev/null +++ b/test/request_source/BUILD @@ -0,0 +1,48 @@ +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test_library( + name = "stub_plugin_impl", + srcs = [ + "stub_plugin_impl.cc", + ], + hdrs = [ + "stub_plugin_impl.h", + ], + repository = "@envoy", + deps = [ + "//include/nighthawk/request_source:request_source_plugin_config_factory_lib", + "//source/common:nighthawk_common_lib", + "//source/common:request_impl_lib", + "//source/common:request_source_impl_lib", + "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", + "@envoy//source/common/protobuf:protobuf_with_external_headers", + "@envoy//source/common/protobuf:utility_lib_with_external_headers", + "@envoy//source/exe:platform_header_lib_with_external_headers", + "@envoy//source/exe:platform_impl_lib", + ], +) + +envoy_cc_test( + name = "request_source_plugin_test", + srcs = ["request_source_plugin_test.cc"], + data = [ + "test_data/test-config.yaml", + ], + repository = "@envoy", + deps = [ + "//source/request_source:request_options_list_plugin_impl", + "//test/request_source:stub_plugin_impl", + "//test/test_common:environment_lib", + "@envoy//source/common/config:utility_lib_with_external_headers", + "@envoy//test/mocks/api:api_mocks", + ], +) diff --git a/test/request_source/request_source_plugin_test.cc b/test/request_source/request_source_plugin_test.cc new file mode 100644 index 000000000..a7fa62cec --- /dev/null +++ b/test/request_source/request_source_plugin_test.cc @@ -0,0 +1,179 @@ +#include "envoy/common/exception.h" + +#include "external/envoy/source/common/config/utility.h" +#include "external/envoy/test/mocks/api/mocks.h" +#include "external/envoy/test/mocks/stats/mocks.h" +#include "external/envoy/test/test_common/file_system_for_test.h" +#include "external/envoy/test/test_common/utility.h" + +#include "request_source/request_options_list_plugin_impl.h" + +#include "test/request_source/stub_plugin_impl.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Nighthawk { + +namespace { +using nighthawk::request_source::FileBasedPluginConfig; +using nighthawk::request_source::StubPluginConfig; +using ::testing::NiceMock; +using ::testing::Test; + +class StubRequestSourcePluginTest : public Test { +public: + StubRequestSourcePluginTest() : api_(Envoy::Api::createApiForTest(stats_store_)) {} + Envoy::Stats::MockIsolatedStatsStore stats_store_; + Envoy::Api::ApiPtr api_; +}; + +class FileBasedRequestSourcePluginTest : public Test { +public: + FileBasedRequestSourcePluginTest() : api_(Envoy::Api::createApiForTest(stats_store_)) {} + Envoy::Stats::MockIsolatedStatsStore stats_store_; + Envoy::Api::ApiPtr api_; + nighthawk::request_source::FileBasedPluginConfig + MakeFileBasedPluginConfigWithTestYaml(absl::string_view request_file) { + nighthawk::request_source::FileBasedPluginConfig config; + config.mutable_file_path()->assign(request_file); + config.mutable_max_file_size()->set_value(4000); + return config; + } +}; + +TEST_F(StubRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + const Envoy::ProtobufTypes::MessagePtr empty_config = config_factory.createEmptyConfigProto(); + const nighthawk::request_source::StubPluginConfig expected_config; + EXPECT_EQ(empty_config->DebugString(), expected_config.DebugString()); + EXPECT_TRUE(Envoy::MessageUtil()(*empty_config, expected_config)); +} + +TEST_F(StubRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginName) { + nighthawk::request_source::StubPluginConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + EXPECT_EQ(config_factory.name(), "nighthawk.stub-request-source-plugin"); +} + +TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPluginType) { + nighthawk::request_source::StubPluginConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + EXPECT_NE(dynamic_cast(plugin.get()), nullptr); +} +TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesWorkingPlugin) { + nighthawk::request_source::StubPluginConfig config; + double test_value = 2; + config.mutable_test_value()->set_value(test_value); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.stub-request-source-plugin"); + auto template_header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(template_header)); + Nighthawk::RequestGenerator generator = plugin->get(); + Nighthawk::RequestPtr request = generator(); + Nighthawk::HeaderMapPtr header = request->header(); + EXPECT_EQ(header->get(Envoy::Http::LowerCaseString("test_value"))->value().getStringView(), + absl::string_view(std::to_string(test_value))); +} +TEST_F(FileBasedRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + const Envoy::ProtobufTypes::MessagePtr empty_config = config_factory.createEmptyConfigProto(); + const nighthawk::request_source::FileBasedPluginConfig expected_config; + EXPECT_EQ(empty_config->DebugString(), expected_config.DebugString()); + EXPECT_TRUE(Envoy::MessageUtil()(*empty_config, expected_config)); +} + +TEST_F(FileBasedRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginName) { + nighthawk::request_source::FileBasedPluginConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + EXPECT_EQ(config_factory.name(), "nighthawk.file-based-request-source-plugin"); +} + +TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPluginType) { + nighthawk::request_source::FileBasedPluginConfig config = MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + EXPECT_NE(dynamic_cast(plugin.get()), nullptr); +} + +TEST_F(FileBasedRequestSourcePluginTest, + CreateRequestSourcePluginGetsWorkingRequestGeneratorThatEndsAtNumRequest) { + nighthawk::request_source::FileBasedPluginConfig config = MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + config.mutable_num_requests()->set_value(2); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr file_based_request_source = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + Nighthawk::RequestGenerator generator = file_based_request_source->get(); + Nighthawk::RequestPtr request = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + Nighthawk::HeaderMapPtr header1 = request->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(request3, nullptr); +} + +TEST_F(FileBasedRequestSourcePluginTest, + CreateRequestSourcePluginWithMoreNumRequestsThanInFileGetsWorkingRequestGeneratorThatLoops) { + nighthawk::request_source::FileBasedPluginConfig config = MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + config.mutable_num_requests()->set_value(4); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr file_based_request_source = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + Nighthawk::RequestGenerator generator = file_based_request_source->get(); + Nighthawk::RequestPtr request = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + Nighthawk::HeaderMapPtr header1 = request->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + Nighthawk::HeaderMapPtr header3 = request3->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(header3->getPathValue(), "/a"); +} +} // namespace +} // namespace Nighthawk diff --git a/test/request_source/stub_plugin_impl.cc b/test/request_source/stub_plugin_impl.cc new file mode 100644 index 000000000..7ca882263 --- /dev/null +++ b/test/request_source/stub_plugin_impl.cc @@ -0,0 +1,47 @@ +#include "test/request_source/stub_plugin_impl.h" + +#include "external/envoy/source/common/protobuf/message_validator_impl.h" +#include "external/envoy/source/common/protobuf/utility.h" +#include "external/envoy/source/exe/platform_impl.h" + +#include "api/client/options.pb.h" + +#include "common/request_impl.h" +#include "common/request_source_impl.h" + +namespace Nighthawk { + +std::string StubRequestSourcePluginConfigFactory::name() const { + return "nighthawk.stub-request-source-plugin"; +} + +Envoy::ProtobufTypes::MessagePtr StubRequestSourcePluginConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +RequestSourcePtr StubRequestSourcePluginConfigFactory::createRequestSourcePlugin( + const Envoy::Protobuf::Message& message, Envoy::Api::Api&, Envoy::Http::RequestHeaderMapPtr) { + const auto& any = dynamic_cast(message); + nighthawk::request_source::StubPluginConfig config; + Envoy::MessageUtil::unpackTo(any, config); + return std::make_unique(config); +} + +REGISTER_FACTORY(StubRequestSourcePluginConfigFactory, RequestSourcePluginConfigFactory); + +StubRequestSource::StubRequestSource(const nighthawk::request_source::StubPluginConfig& config) + : test_value_{config.has_test_value() ? config.test_value().value() : 0} {} +RequestGenerator StubRequestSource::get() { + + RequestGenerator request_generator = [this]() { + Envoy::Http::RequestHeaderMapPtr header = Envoy::Http::RequestHeaderMapImpl::create(); + header->setCopy(Envoy::Http::LowerCaseString("test_value"), std::to_string(test_value_)); + auto returned_request_impl = std::make_unique(std::move(header)); + return returned_request_impl; + }; + return request_generator; +} + +void StubRequestSource::initOnThread() {} + +} // namespace Nighthawk \ No newline at end of file diff --git a/test/request_source/stub_plugin_impl.h b/test/request_source/stub_plugin_impl.h new file mode 100644 index 000000000..b45e64c9e --- /dev/null +++ b/test/request_source/stub_plugin_impl.h @@ -0,0 +1,56 @@ +// Test implementations of RequestSourceConfigFactory and RequestSource that perform minimum +// functionality for testing purposes. +#pragma once + +#include "envoy/registry/registry.h" + +#include "nighthawk/request_source/request_source_plugin_config_factory.h" + +#include "api/client/options.pb.h" +#include "api/request_source/request_source_plugin.pb.h" + +#include "common/uri_impl.h" + +namespace Nighthawk { + +// Stub Request Source implementation for comparison. +class StubRequestSource : public RequestSource { +public: + StubRequestSource(const nighthawk::request_source::StubPluginConfig& config); + // The generator function will return a header whose only value is the test_value taken from the + // config. The function is threadsafe. + RequestGenerator get() override; + + // default implementation + void initOnThread() override; + +private: + const double test_value_; +}; + +// Factory that creates a StubRequestSource from a StubRequestSourcePluginConfig proto. +// Registered as an Envoy plugin. +// Stub implementation of RequestSourceConfigFactory which produces a RequestSource. +// RequestSources are used to get RequestGenerators which generate requests for the benchmark +// client. All plugins configuration are specified in the request_source_plugin.proto This class is +// thread-safe, but it doesn't do anything. Usage: assume you are passed an appropriate Any type +// object called config, an Api object called api, and a default header called header. auto& +// config_factory = +// Envoy::Config::Utility::getAndCheckFactoryByName( +// "nighthawk.stub-request-source-plugin"); +// RequestSourcePtr plugin = +// config_factory.createRequestSourcePlugin(config, std::move(api), std::move(header)); + +class StubRequestSourcePluginConfigFactory : public virtual RequestSourcePluginConfigFactory { +public: + std::string name() const override; + Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override; + // This implementation is thread safe, but the RequestSource it generates doesn't do much. + RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& message, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) override; +}; + +// This factory will be activated through RequestSourceFactory in factories.h +DECLARE_FACTORY(StubRequestSourcePluginConfigFactory); +} // namespace Nighthawk \ No newline at end of file diff --git a/test/request_source/test_data/test-config.yaml b/test/request_source/test_data/test-config.yaml new file mode 100644 index 000000000..e8b4cdaac --- /dev/null +++ b/test/request_source/test_data/test-config.yaml @@ -0,0 +1,13 @@ +options: + - request_body_size: 10 + request_headers: + - { header: { key: ":path", value: "/a" } } + - { header: { key: "foo", value: "bar" } } + - { header: { key: "foo", value: "bar2" } } + - { header: { key: "x-nh", value: "1" } } + - request_body_size: 10 + request_headers: + - { header: { key: ":path", value: "/b" } } + - { header: { key: "bar", value: "foo" } } + - { header: { key: "bar", value: "foo2" } } + - { header: { key: "x-nh", value: "2" } } \ No newline at end of file diff --git a/tools/check_format.sh b/tools/check_format.sh index 1389fd4d5..847b28ddd 100755 --- a/tools/check_format.sh +++ b/tools/check_format.sh @@ -8,11 +8,11 @@ TO_CHECK="${2:-$PWD}" bazel run @envoy//tools:code_format/check_format.py -- \ --skip_envoy_build_rule_check --namespace_check Nighthawk \ --build_fixer_check_excluded_paths=$(realpath ".") \ - --include_dir_order envoy,nighthawk,external/source/envoy,external,api,common,source,exe,server,client,grpcpp,test_common,test \ + --include_dir_order envoy,nighthawk,external/source/envoy,external,api,common,source,exe,server,client,grpcpp,request_source,test_common,test \ $1 $TO_CHECK # The include checker doesn't support per-file checking, so we only # run it when a full check is requested. if [ $PWD == $TO_CHECK ]; then bazel run //tools:check_envoy_includes.py -fi +fi \ No newline at end of file From 9d7716114e91be8d0cab8cea3481dac0362ac52b Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 6 Oct 2020 16:51:44 +0200 Subject: [PATCH 26/63] Test-server extension: use shared configuration base and test facilities (#512) - Make the test server use the shared configuration handling code. - Convert its tests to use the new facilities that are shared accross extensions. Last in a series of PRs to fix #498 This is on par with what we did for the dynamic delay and timing extensions, but this extension was slightly more complex to begin with, so saving the best for last ;-) Signed-off-by: Otto van der Schaaf --- ci/do_ci.sh | 4 +- source/server/http_test_server_filter.cc | 59 ++-- source/server/http_test_server_filter.h | 15 +- source/server/http_time_tracking_filter.cc | 3 +- ...ttp_test_server_filter_integration_test.cc | 264 +++++------------- 5 files changed, 111 insertions(+), 234 deletions(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 8d430b706..4abbcfe8e 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -39,7 +39,7 @@ function do_clang_tidy() { function do_unit_test_coverage() { export TEST_TARGETS="//test/... -//test:python_test" - export COVERAGE_THRESHOLD=93.2 + export COVERAGE_THRESHOLD=94.0 echo "bazel coverage build with tests ${TEST_TARGETS}" test/run_nighthawk_bazel_coverage.sh ${TEST_TARGETS} exit 0 @@ -47,7 +47,7 @@ function do_unit_test_coverage() { function do_integration_test_coverage() { export TEST_TARGETS="//test:python_test" - export COVERAGE_THRESHOLD=78.0 + export COVERAGE_THRESHOLD=78.6 echo "bazel coverage build with tests ${TEST_TARGETS}" test/run_nighthawk_bazel_coverage.sh ${TEST_TARGETS} exit 0 diff --git a/source/server/http_test_server_filter.cc b/source/server/http_test_server_filter.cc index cf01105cf..5dd51851a 100644 --- a/source/server/http_test_server_filter.cc +++ b/source/server/http_test_server_filter.cc @@ -13,8 +13,8 @@ namespace Nighthawk { namespace Server { HttpTestServerDecoderFilterConfig::HttpTestServerDecoderFilterConfig( - nighthawk::server::ResponseOptions proto_config) - : server_config_(std::move(proto_config)) {} + const nighthawk::server::ResponseOptions& proto_config) + : FilterConfigurationBase(proto_config, "test-server") {} HttpTestServerDecoderFilter::HttpTestServerDecoderFilter( HttpTestServerDecoderFilterConfigSharedPtr config) @@ -22,43 +22,34 @@ HttpTestServerDecoderFilter::HttpTestServerDecoderFilter( void HttpTestServerDecoderFilter::onDestroy() {} -void HttpTestServerDecoderFilter::sendReply() { - if (!json_merge_error_) { - std::string response_body(base_config_.response_body_size(), 'a'); - if (request_headers_dump_.has_value()) { - response_body += *request_headers_dump_; - } - decoder_callbacks_->sendLocalReply( - static_cast(200), response_body, - [this](Envoy::Http::ResponseHeaderMap& direct_response_headers) { - Configuration::applyConfigToResponseHeaders(direct_response_headers, base_config_); - }, - absl::nullopt, ""); - } else { - decoder_callbacks_->sendLocalReply( - static_cast(500), - fmt::format("test-server didn't understand the request: {}", error_message_), nullptr, - absl::nullopt, ""); +void HttpTestServerDecoderFilter::sendReply(const nighthawk::server::ResponseOptions& options) { + std::string response_body(options.response_body_size(), 'a'); + if (request_headers_dump_.has_value()) { + response_body += *request_headers_dump_; } + decoder_callbacks_->sendLocalReply( + static_cast(200), response_body, + [options](Envoy::Http::ResponseHeaderMap& direct_response_headers) { + Configuration::applyConfigToResponseHeaders(direct_response_headers, options); + }, + absl::nullopt, ""); } Envoy::Http::FilterHeadersStatus HttpTestServerDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool end_stream) { - // TODO(oschaaf): Add functionality to clear fields - base_config_ = config_->server_config(); - const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); - if (request_config_header) { - json_merge_error_ = !Configuration::mergeJsonConfig( - request_config_header->value().getStringView(), base_config_, error_message_); - } - if (base_config_.echo_request_headers()) { - std::stringstream headers_dump; - headers_dump << "\nRequest Headers:\n" << headers; - request_headers_dump_ = headers_dump.str(); - } + config_->computeEffectiveConfiguration(headers); if (end_stream) { - sendReply(); + if (!config_->maybeSendErrorReply(*decoder_callbacks_)) { + const absl::StatusOr effective_config = + config_->getEffectiveConfiguration(); + if (effective_config.value()->echo_request_headers()) { + std::stringstream headers_dump; + headers_dump << "\nRequest Headers:\n" << headers; + request_headers_dump_ = headers_dump.str(); + } + sendReply(*effective_config.value()); + } } return Envoy::Http::FilterHeadersStatus::StopIteration; } @@ -66,7 +57,9 @@ HttpTestServerDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& header Envoy::Http::FilterDataStatus HttpTestServerDecoderFilter::decodeData(Envoy::Buffer::Instance&, bool end_stream) { if (end_stream) { - sendReply(); + if (!config_->maybeSendErrorReply(*decoder_callbacks_)) { + sendReply(*config_->getEffectiveConfiguration().value()); + } } return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; } diff --git a/source/server/http_test_server_filter.h b/source/server/http_test_server_filter.h index 6f8d2ace1..16f3e378e 100644 --- a/source/server/http_test_server_filter.h +++ b/source/server/http_test_server_filter.h @@ -6,17 +6,15 @@ #include "api/server/response_options.pb.h" +#include "server/http_filter_config_base.h" + namespace Nighthawk { namespace Server { // Basically this is left in as a placeholder for further configuration. -class HttpTestServerDecoderFilterConfig { +class HttpTestServerDecoderFilterConfig : public FilterConfigurationBase { public: - HttpTestServerDecoderFilterConfig(nighthawk::server::ResponseOptions proto_config); - const nighthawk::server::ResponseOptions& server_config() { return server_config_; } - -private: - const nighthawk::server::ResponseOptions server_config_; + HttpTestServerDecoderFilterConfig(const nighthawk::server::ResponseOptions& proto_config); }; using HttpTestServerDecoderFilterConfigSharedPtr = @@ -36,12 +34,9 @@ class HttpTestServerDecoderFilter : public Envoy::Http::StreamDecoderFilter { void setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks&) override; private: - void sendReply(); + void sendReply(const nighthawk::server::ResponseOptions& options); const HttpTestServerDecoderFilterConfigSharedPtr config_; Envoy::Http::StreamDecoderFilterCallbacks* decoder_callbacks_; - nighthawk::server::ResponseOptions base_config_; - bool json_merge_error_{false}; - std::string error_message_; absl::optional request_headers_dump_; }; diff --git a/source/server/http_time_tracking_filter.cc b/source/server/http_time_tracking_filter.cc index 045192d0e..aba57d46f 100644 --- a/source/server/http_time_tracking_filter.cc +++ b/source/server/http_time_tracking_filter.cc @@ -47,7 +47,8 @@ Envoy::Http::FilterDataStatus HttpTimeTrackingFilter::decodeData(Envoy::Buffer:: Envoy::Http::FilterHeadersStatus HttpTimeTrackingFilter::encodeHeaders(Envoy::Http::ResponseHeaderMap& response_headers, bool) { - const auto effective_config = config_->getEffectiveConfiguration(); + const absl::StatusOr effective_config = + config_->getEffectiveConfiguration(); if (effective_config.ok()) { const std::string previous_request_delta_in_response_header = effective_config.value()->emit_previous_request_delta_in_response_header(); diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 3ae792d0c..8b355101f 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -1,94 +1,39 @@ -#include "envoy/upstream/cluster_manager.h" -#include "envoy/upstream/upstream.h" - -#include "external/envoy/test/common/upstream/utility.h" -#include "external/envoy/test/integration/http_integration.h" - #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" #include "server/configuration.h" #include "server/http_test_server_filter.h" -#include "server/well_known_headers.h" + +#include "test/server/http_filter_integration_test_base.h" #include "gtest/gtest.h" namespace Nighthawk { +namespace { using namespace testing; -constexpr absl::string_view kBadJson = "bad_json"; -class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, - public TestWithParam { -public: - HttpTestServerIntegrationTestBase() - : HttpIntegrationTest(Envoy::Http::CodecClient::Type::HTTP1, GetParam(), realTime()) {} - - // TODO(oschaaf): Modify Envoy's Envoy::IntegrationUtil::makeSingleRequest() to allow for a way to - // manipulate the request headers before they get send. Then we can eliminate these copies. - Envoy::BufferingStreamDecoderPtr makeSingleRequest( - uint32_t port, absl::string_view method, absl::string_view url, absl::string_view body, - Envoy::Http::CodecClient::Type type, Envoy::Network::Address::IpVersion ip_version, - absl::string_view host, absl::string_view content_type, - const std::function& request_header_delegate) { - auto addr = Envoy::Network::Utility::resolveUrl(fmt::format( - "tcp://{}:{}", Envoy::Network::Test::getLoopbackAddressUrlString(ip_version), port)); - return makeSingleRequest(addr, method, url, body, type, host, content_type, - request_header_delegate); - } +constexpr absl::string_view kDefaultProto = R"EOF( +name: test-server +typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_body_size: 10 + response_headers: + - { header: { key: "x-supplied-by", value: "nighthawk-test-server"} } +)EOF"; - Envoy::BufferingStreamDecoderPtr makeSingleRequest( - const Envoy::Network::Address::InstanceConstSharedPtr& addr, absl::string_view method, - absl::string_view url, absl::string_view body, Envoy::Http::CodecClient::Type type, - absl::string_view host, absl::string_view content_type, - const std::function& request_header_delegate) { - Envoy::Api::ApiPtr api = Envoy::Api::createApiForTest(); - Envoy::Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - std::shared_ptr cluster{ - new NiceMock()}; - Envoy::Upstream::HostDescriptionConstSharedPtr host_description{ - Envoy::Upstream::makeTestHostDescription(cluster, "tcp://127.0.0.1:80")}; - Envoy::Http::CodecClientProd client( - type, - dispatcher->createClientConnection(addr, Envoy::Network::Address::InstanceConstSharedPtr(), - Envoy::Network::Test::createRawBufferSocket(), nullptr), - host_description, *dispatcher, random_); - Envoy::BufferingStreamDecoderPtr response( - new Envoy::BufferingStreamDecoder([&client, &dispatcher]() -> void { - client.close(); - dispatcher->exit(); - })); - Envoy::Http::RequestEncoder& encoder = client.newStream(*response); - encoder.getStream().addCallbacks(*response); - - auto headers = Envoy::Http::RequestHeaderMapImpl::create(); - headers->setMethod(method); - headers->setPath(url); - headers->setHost(host); - headers->setScheme(Envoy::Http::Headers::get().SchemeValues.Http); - if (!content_type.empty()) { - headers->setContentType(content_type); - } - request_header_delegate(*headers); - encoder.encodeHeaders(*headers, body.empty()); - if (!body.empty()) { - Envoy::Buffer::OwnedImpl body_buffer(body); - encoder.encodeData(body_buffer, true); - } +constexpr absl::string_view kNoConfigProto = R"EOF( +name: test-server +)EOF"; - dispatcher->run(Envoy::Event::Dispatcher::RunType::Block); - return response; - } +class HttpTestServerIntegrationTest : public HttpFilterIntegrationTestBase, + public TestWithParam { +public: + HttpTestServerIntegrationTest() : HttpFilterIntegrationTestBase(GetParam()) {} void testWithResponseSize(int response_body_size, bool expect_header = true) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [response_body_size](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - fmt::format("{{response_body_size:{}}}", response_body_size); - request_headers.addCopy( - Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, header_config); - }); + setRequestLevelConfiguration(fmt::format("{{response_body_size:{}}}", response_body_size)); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); if (expect_header) { @@ -105,55 +50,26 @@ class HttpTestServerIntegrationTestBase : public Envoy::HttpIntegrationTest, } void testBadResponseSize(int response_body_size) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [response_body_size](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - fmt::format("{{response_body_size:{}}}", response_body_size); - request_headers.addCopy( - Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, header_config); - }); + setRequestLevelConfiguration(fmt::format("{{response_body_size:{}}}", response_body_size)); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("500", response->headers().Status()->value().getStringView()); } }; -class HttpTestServerIntegrationTest : public HttpTestServerIntegrationTestBase { -public: - void SetUp() override { initialize(); } - - void initialize() override { - config_helper_.addFilter(R"EOF( -name: test-server -typed_config: - "@type": type.googleapis.com/nighthawk.server.ResponseOptions - response_body_size: 10 - response_headers: - - { header: { key: "x-supplied-by", value: "nighthawk-test-server"} } -)EOF"); - HttpTestServerIntegrationTestBase::initialize(); - } - - void TearDown() override { - cleanupUpstreamAndDownstream(); - test_server_.reset(); - fake_upstreams_.clear(); - } -}; - INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTestServerIntegrationTest, ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); TEST_P(HttpTestServerIntegrationTest, TestNoHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = - makeSingleRequest(lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, - "foo.com", "", [](Envoy::Http::RequestHeaderMapImpl&) {}); + initializeFilterConfiguration(kDefaultProto); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(std::string(10, 'a'), response->body()); } TEST_P(HttpTestServerIntegrationTest, TestBasics) { + initializeFilterConfiguration(kDefaultProto); testWithResponseSize(1); testWithResponseSize(10); testWithResponseSize(100); @@ -161,30 +77,34 @@ TEST_P(HttpTestServerIntegrationTest, TestBasics) { testWithResponseSize(10000); } -TEST_P(HttpTestServerIntegrationTest, TestNegative) { testBadResponseSize(-1); } +TEST_P(HttpTestServerIntegrationTest, TestNegative) { + initializeFilterConfiguration(kDefaultProto); + testBadResponseSize(-1); +} // TODO(oschaaf): We can't currently override with a default value ('0') in this case. -TEST_P(HttpTestServerIntegrationTest, DISABLED_TestZeroLengthRequest) { testWithResponseSize(0); } +TEST_P(HttpTestServerIntegrationTest, DISABLED_TestZeroLengthRequest) { + initializeFilterConfiguration(kDefaultProto); + testWithResponseSize(0); +} TEST_P(HttpTestServerIntegrationTest, TestMaxBoundaryLengthRequest) { + initializeFilterConfiguration(kDefaultProto); const int max = 1024 * 1024 * 4; testWithResponseSize(max); } TEST_P(HttpTestServerIntegrationTest, TestTooLarge) { + initializeFilterConfiguration(kDefaultProto); const int max = 1024 * 1024 * 4; testBadResponseSize(max + 1); } TEST_P(HttpTestServerIntegrationTest, TestHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"; - request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - header_config); - }); + initializeFilterConfiguration(kDefaultProto); + setRequestLevelConfiguration( + R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ("bar2", @@ -193,17 +113,15 @@ TEST_P(HttpTestServerIntegrationTest, TestHeaderConfig) { } TEST_P(HttpTestServerIntegrationTest, TestEchoHeaders) { + initializeFilterConfiguration(kDefaultProto); + setRequestLevelConfiguration("{echo_request_headers: true}"); + setRequestHeader(Envoy::Http::LowerCaseString("gray"), "pidgeon"); + setRequestHeader(Envoy::Http::LowerCaseString("red"), "fox"); + setRequestHeader(Envoy::Http::LowerCaseString(":authority"), "foo.com"); + setRequestHeader(Envoy::Http::LowerCaseString(":path"), "/somepath"); for (auto unique_header : {"one", "two", "three"}) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/somepath", "", downstream_protocol_, version_, "foo.com", "", - [unique_header](Envoy::Http::RequestHeaderMapImpl& request_headers) { - request_headers.addCopy(Envoy::Http::LowerCaseString("gray"), "pidgeon"); - request_headers.addCopy(Envoy::Http::LowerCaseString("red"), "fox"); - request_headers.addCopy(Envoy::Http::LowerCaseString("unique_header"), unique_header); - request_headers.addCopy( - Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - "{echo_request_headers: true}"); - }); + setRequestHeader(Envoy::Http::LowerCaseString("unique_header"), unique_header); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_THAT(response->body(), HasSubstr(R"(':authority', 'foo.com')")); @@ -215,37 +133,16 @@ TEST_P(HttpTestServerIntegrationTest, TestEchoHeaders) { } } -class HttpTestServerIntegrationNoConfigTest : public HttpTestServerIntegrationTestBase { -public: - void SetUp() override { initialize(); } - - void TearDown() override { - cleanupUpstreamAndDownstream(); - test_server_.reset(); - fake_upstreams_.clear(); - } - - void initialize() override { - config_helper_.addFilter(R"EOF( -name: test-server -)EOF"); - HttpTestServerIntegrationTestBase::initialize(); - } -}; - -INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTestServerIntegrationNoConfigTest, - ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); - -TEST_P(HttpTestServerIntegrationNoConfigTest, TestNoHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = - makeSingleRequest(lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, - "foo.com", "", [](Envoy::Http::RequestHeaderMapImpl&) {}); +TEST_P(HttpTestServerIntegrationTest, NoNoStaticConfigHeaderConfig) { + initializeFilterConfiguration(kNoConfigProto); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ("", response->body()); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestBasics) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigBasics) { + initializeFilterConfiguration(kNoConfigProto); testWithResponseSize(1, false); testWithResponseSize(10, false); testWithResponseSize(100, false); @@ -253,31 +150,34 @@ TEST_P(HttpTestServerIntegrationNoConfigTest, TestBasics) { testWithResponseSize(10000, false); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestNegative) { testBadResponseSize(-1); } +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigNegative) { + initializeFilterConfiguration(kNoConfigProto); + testBadResponseSize(-1); +} -TEST_P(HttpTestServerIntegrationNoConfigTest, TestZeroLengthRequest) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigZeroLengthRequest) { + initializeFilterConfiguration(kNoConfigProto); testWithResponseSize(0, false); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestMaxBoundaryLengthRequest) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigMaxBoundaryLengthRequest) { + initializeFilterConfiguration(kNoConfigProto); const int max = 1024 * 1024 * 4; testWithResponseSize(max, false); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestTooLarge) { +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigTooLarge) { + initializeFilterConfiguration(kNoConfigProto); const int max = 1024 * 1024 * 4; testBadResponseSize(max + 1); } -TEST_P(HttpTestServerIntegrationNoConfigTest, TestHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [](Envoy::Http::RequestHeaderMapImpl& request_headers) { - const std::string header_config = - R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"; - request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - header_config); - }); +TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigHeaderConfig) { + initializeFilterConfiguration(kNoConfigProto); + setRequestLevelConfiguration( + R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ("bar2", @@ -285,24 +185,8 @@ TEST_P(HttpTestServerIntegrationNoConfigTest, TestHeaderConfig) { EXPECT_EQ("", response->body()); } -TEST_P(HttpTestServerIntegrationNoConfigTest, BadTestHeaderConfig) { - Envoy::BufferingStreamDecoderPtr response = makeSingleRequest( - lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "foo.com", "", - [](Envoy::Http::RequestHeaderMapImpl& request_headers) { - request_headers.addCopy(Nighthawk::Server::TestServer::HeaderNames::get().TestServerConfig, - kBadJson); - }); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("500", response->headers().Status()->value().getStringView()); - EXPECT_EQ("test-server didn't understand the request: Error merging json config: Unable to parse " - "JSON as proto (INVALID_ARGUMENT:Unexpected token.\nbad_json\n^): bad_json", - response->body()); -} - -class HttpTestServerDecoderFilterTest : public Test {}; - // Here we test config-level merging as well as its application at the response-header level. -TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { +TEST(HttpTestServerDecoderFilterTest, HeaderMerge) { nighthawk::server::ResponseOptions initial_options; auto response_header = initial_options.add_response_headers(); response_header->mutable_header()->set_key("foo"); @@ -312,9 +196,11 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { Server::HttpTestServerDecoderFilterConfigSharedPtr config = std::make_shared(initial_options); Server::HttpTestServerDecoderFilter f(config); - std::string error_message; - nighthawk::server::ResponseOptions options = config->server_config(); + absl::StatusOr options_or = + config->getEffectiveConfiguration(); + ASSERT_TRUE(options_or.ok()); + nighthawk::server::ResponseOptions options = *options_or.value(); EXPECT_EQ(1, options.response_headers_size()); EXPECT_EQ("foo", options.response_headers(0).header().key()); @@ -326,6 +212,7 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { EXPECT_TRUE(Envoy::TestUtility::headerMapEqualIgnoreOrder( header_map, Envoy::Http::TestResponseHeaderMapImpl{{":status", "200"}, {"foo", "bar1"}})); + std::string error_message; EXPECT_TRUE(Server::Configuration::mergeJsonConfig( R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: false } ]})", options, error_message)); @@ -355,11 +242,12 @@ TEST_F(HttpTestServerDecoderFilterTest, HeaderMerge) { header_map, Envoy::Http::TestResponseHeaderMapImpl{ {":status", "200"}, {"foo", "bar2"}, {"foo2", "bar3"}})); - EXPECT_FALSE(Server::Configuration::mergeJsonConfig(kBadJson, options, error_message)); + EXPECT_FALSE(Server::Configuration::mergeJsonConfig("bad_json", options, error_message)); EXPECT_EQ("Error merging json config: Unable to parse JSON as proto (INVALID_ARGUMENT:Unexpected " "token.\nbad_json\n^): bad_json", error_message); EXPECT_EQ(3, options.response_headers_size()); } +} // namespace } // namespace Nighthawk From 3eea4b54bcda9c2d5ca8edab4e1013790fac20d5 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 12 Oct 2020 21:32:48 +0200 Subject: [PATCH 27/63] Update Envoy & HdrHistogram_c (#561) - Update Envoy to 2097fe908f2abb718757dbd4087d793c861d7c5a - Update HdrHistogram_c to 0.11.2 Adds `--define tcmalloc=gperftools` to the opt build we use to push images, to (continue to) allow cpu profiling. We need this because of https://github.com/envoyproxy/envoy/commit/4c0d2d2a36996f2ec7d00b519ea90068629a2c55 Signed-off-by: Otto van der Schaaf --- .bazelrc | 36 +++++++++++--------- .circleci/config.yml | 4 +-- bazel/repositories.bzl | 8 ++--- benchmarks/README.md | 1 + ci/do_ci.sh | 5 +-- ci/run_clang_tidy.sh | 64 ++++++++++++++++++++++++++--------- source/client/process_impl.cc | 6 ++-- test/rate_limiter_test.cc | 2 +- 8 files changed, 82 insertions(+), 44 deletions(-) diff --git a/.bazelrc b/.bazelrc index 0a6a7bd47..ac352e308 100644 --- a/.bazelrc +++ b/.bazelrc @@ -27,7 +27,8 @@ build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 build --javabase=@bazel_tools//tools/jdk:remote_jdk11 build --enable_platform_specific_config -# Enable position independent code, this option is not supported on Windows and default on on macOS. +# Enable position independent code (this is the default on macOS and Windows) +# (Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/421) build:linux --copt=-fPIC build:linux --cxxopt=-std=c++17 build:linux --conlyopt=-fexceptions @@ -117,7 +118,8 @@ build:libc++ --config=clang build:libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:libc++ --action_env=LDFLAGS=-stdlib=libc++ build:libc++ --action_env=BAZEL_CXXOPTS=-stdlib=libc++ -build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a:-lm +build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a +build:libc++ --action_env=BAZEL_LINKOPTS=-lm:-pthread build:libc++ --define force_libcpp=enabled # Optimize build for binary size reduction. @@ -199,6 +201,8 @@ build:remote --spawn_strategy=remote,sandboxed,local build:remote --strategy=Javac=remote,sandboxed,local build:remote --strategy=Closure=remote,sandboxed,local build:remote --strategy=Genrule=remote,sandboxed,local +# rules_rust is not remote runnable (yet) +build:remote --strategy=Rustc=sandboxed,local build:remote --remote_timeout=7200 build:remote --auth_enabled=true build:remote --remote_download_toplevel @@ -235,7 +239,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:1526786b8f5cfce7a40829a0c527b5a27570889c +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -292,27 +296,27 @@ build:windows --define signal_trace=disabled build:windows --define hot_restart=disabled build:windows --define tcmalloc=disabled build:windows --define manual_stamp=manual_stamp +build:windows --cxxopt="/std:c++17" -# Should not be required after upstream fix to bazel, -# and already a no-op to linux/macos builds -# see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 +# TODO(wrowe,sunjayBhatia): Resolve bugs upstream in curl and rules_foreign_cc +# See issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 build:windows --copt="-DCARES_STATICLIB" build:windows --copt="-DNGHTTP2_STATICLIB" build:windows --copt="-DCURL_STATICLIB" -build:windows --cxxopt="/std:c++17" -# Required to work around build defects on Windows MSVC cl -# Unguarded gcc pragmas in quiche are not recognized by MSVC -build:msvc-cl --copt="/wd4068" -# Allows 'nodiscard' function return values to be discarded -build:msvc-cl --copt="/wd4834" -# Allows inline functions to be undefined -build:msvc-cl --copt="/wd4506" -build:msvc-cl --copt="-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING" +# Override any clang preference if building msvc-cl +# Drop the determinism feature (-DDATE etc are a no-op in msvc-cl) +build:msvc-cl --action_env=USE_CLANG_CL="" +build:msvc-cl --define clang_cl=0 +build:msvc-cl --features=-determinism + +# Windows build behaviors when using clang-cl +build:clang-cl --action_env=USE_CLANG_CL=1 +build:clang-cl --define clang_cl=1 # Required to work around Windows clang-cl build defects # Ignore conflicting definitions of _WIN32_WINNT -# Overriding __TIME__ etc is problematic (and is actually an invalid no-op) +# Override determinism flags (DATE etc) is valid on clang-cl compiler build:clang-cl --copt="-Wno-macro-redefined" build:clang-cl --copt="-Wno-builtin-macro-redefined" build:clang-cl --action_env=USE_CLANG_CL=1 diff --git a/.circleci/config.yml b/.circleci/config.yml index a5b8b130b..8559d09e4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ references: - envoy-build-image: &envoy-build-image # October 2nd, 2020 - envoyproxy/envoy-build-ubuntu:1526786b8f5cfce7a40829a0c527b5a27570889c + envoy-build-image: &envoy-build-image # October 12th, 2020 + envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a version: 2 jobs: build: diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 7b8ff6d20..583aa2a84 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,10 +1,10 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "c318156496edc46c844822a3d1f107ee496fa449" # October 2nd, 2020 -ENVOY_SHA = "dc4ee70d317f0310b96cb803a5c02b42589f5d1dfbfb2989f4bf72800aaa799a" +ENVOY_COMMIT = "2097fe908f2abb718757dbd4087d793c861d7c5a" # October 12th, 2020 +ENVOY_SHA = "323360544ee355f0eddea742b31a80a94899090db1d64029cd22880083b311c0" -HDR_HISTOGRAM_C_VERSION = "0.11.1" # September 17th, 2020 -HDR_HISTOGRAM_C_SHA = "8550071d4ae5c8229448f9b68469d6d42c620cd25111b49c696d00185e5f8329" +HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 +HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" def nighthawk_dependencies(): http_archive( diff --git a/benchmarks/README.md b/benchmarks/README.md index 94a145be7..2732a378b 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -67,6 +67,7 @@ bazel test \ --compilation_mode=opt \ --cxxopt=-g \ --cxxopt=-ggdb3 \ + --define tcmalloc=gperftools \ //benchmarks:* ``` diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 4abbcfe8e..0141ad83c 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -24,8 +24,8 @@ function do_build () { } function do_opt_build () { - bazel build $BAZEL_BUILD_OPTIONS -c opt //:nighthawk - bazel build $BAZEL_BUILD_OPTIONS -c opt //benchmarks:benchmarks + bazel build $BAZEL_BUILD_OPTIONS -c opt --define tcmalloc=gperftools //:nighthawk + bazel build $BAZEL_BUILD_OPTIONS -c opt --define tcmalloc=gperftools //benchmarks:benchmarks } function do_test() { @@ -127,6 +127,7 @@ function do_benchmark_with_own_binaries() { --compilation_mode=opt \ --cxxopt=-g \ --cxxopt=-ggdb3 \ + --define tcmalloc=gperftools \ //benchmarks:* } diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index c363d6877..040b5a46b 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -1,6 +1,7 @@ #!/bin/bash set -eo pipefail + # ENVOY_SRCDIR should point to where Envoy source lives, while SRCDIR could be a downstream build # (for example envoy-filter-example). [[ -z "${ENVOY_SRCDIR}" ]] && ENVOY_SRCDIR="${PWD}" @@ -36,41 +37,72 @@ function exclude_win32_impl() { # Do not run clang-tidy against macOS impl # TODO: We should run clang-tidy against macOS impl for completeness function exclude_macos_impl() { - grep -v source/common/filesystem/kqueue/ + grep -v source/common/filesystem/kqueue/ | grep -v source/common/network/apple_dns_impl | grep -v test/common/network/apple_dns_impl_test } # Do not run incremental clang-tidy on check_format testdata files. -function exclude_testdata() { +function exclude_check_format_testdata() { grep -v tools/testdata/check_format/ } +# Do not run clang-tidy on envoy_headersplit testdata files. +function exclude_headersplit_testdata() { + grep -v tools/envoy_headersplit/ +} + +# Do not run clang-tidy against Chromium URL import, this needs to largely +# reflect the upstream structure. +function exclude_chromium_url() { + grep -v source/common/chromium_url/ +} + # Exclude files in third_party which are temporary forks from other OSS projects. function exclude_third_party() { grep -v third_party/ } +# Exclude files which are part of the Wasm emscripten environment +function exclude_wasm_emscripten() { + grep -v source/extensions/common/wasm/ext +} + +# Exclude files which are part of the Wasm SDK +function exclude_wasm_sdk() { + grep -v proxy_wasm_cpp_sdk +} + +# Exclude files which are part of the Wasm Host environment +function exclude_wasm_host() { + grep -v proxy_wasm_cpp_host +} + +# Exclude proxy-wasm test_data. +function exclude_wasm_test_data() { + grep -v wasm/test_data +} + function filter_excludes() { - exclude_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party + exclude_check_format_testdata | exclude_headersplit_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data } function run_clang_tidy() { python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ - -clang-tidy-binary=${CLANG_TIDY} -header-filter='-external' \ - -clang-apply-replacements-binary=${CLANG_APPLY_REPLACEMENTS} \ - -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p ${SRCDIR} -quiet \ - ${APPLY_CLANG_TIDY_FIXES:+-fix} $@ + -clang-tidy-binary="${CLANG_TIDY}" \ + -clang-apply-replacements-binary="${CLANG_APPLY_REPLACEMENTS}" \ + -export-fixes=${FIX_YAML} -j "${NUM_CPUS:-0}" -p "${SRCDIR}" -quiet \ + ${APPLY_CLANG_TIDY_FIXES:+-fix} "$@" } function run_clang_tidy_diff() { - git diff $1 | filter_excludes | \ + git diff "$1" | filter_excludes | \ python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ - -clang-tidy-binary=${CLANG_TIDY} \ - -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p 1 -quiet + -clang-tidy-binary="${CLANG_TIDY}" \ + -export-fixes="${FIX_YAML}" -j "${NUM_CPUS:-0}" -p 1 -quiet } if [[ $# -gt 0 ]]; then - echo "Running clang-tidy on: $@" - run_clang_tidy $@ + echo "Running clang-tidy on: $*" + run_clang_tidy "$@" elif [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running a full clang-tidy" run_clang_tidy @@ -81,15 +113,15 @@ else elif [[ "${BUILD_REASON}" == *CI ]]; then DIFF_REF="HEAD^" else - DIFF_REF=$(${ENVOY_SRCDIR}/tools/git/last_github_commit.sh) + DIFF_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) fi fi - echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse ${DIFF_REF})), current HEAD ($(git rev-parse HEAD))" - run_clang_tidy_diff ${DIFF_REF} + echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse "${DIFF_REF}")), current HEAD ($(git rev-parse HEAD))" + run_clang_tidy_diff "${DIFF_REF}" fi if [[ -s "${FIX_YAML}" ]]; then echo "clang-tidy check failed, potentially fixed by clang-apply-replacements:" - cat ${FIX_YAML} + cat "${FIX_YAML}" exit 1 fi diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index e6bfcf406..516a76f92 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -71,8 +71,8 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options) override { if (protocol == Envoy::Http::Protocol::Http11 || protocol == Envoy::Http::Protocol::Http10) { - auto* h1_pool = - new Http1PoolImpl(dispatcher, random_, host, priority, options, transport_socket_options); + auto* h1_pool = new Http1PoolImpl(dispatcher, api_.randomGenerator(), host, priority, options, + transport_socket_options); h1_pool->setConnectionReuseStrategy(connection_reuse_strategy_); h1_pool->setPrefetchConnections(prefetch_connections_); return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; @@ -471,7 +471,7 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector( time_system_); cluster_manager_factory_ = std::make_unique( - admin_, Envoy::Runtime::LoaderSingleton::get(), store_root_, tls_, generator_, + admin_, Envoy::Runtime::LoaderSingleton::get(), store_root_, tls_, dispatcher_->createDnsResolver({}, false), *ssl_context_manager_, *dispatcher_, *local_info_, secret_manager_, validation_context_, *api_, http_context_, grpc_context_, access_log_manager_, *singleton_manager_); diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index 754bbd3bd..296f88373 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -108,7 +108,7 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTestBadArgs) { Envoy::Event::SimulatedTimeSystem time_system; // Verify we enforce future-only scheduling. - for (const auto timing : + for (const auto& timing : std::vector{time_system.systemTime(), time_system.systemTime() - 10ms}) { std::unique_ptr mock_rate_limiter = std::make_unique(); MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; From 20a9de24f644fbb9b50f429e6adbd7374c1e8c29 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 21 Oct 2020 18:11:10 +0200 Subject: [PATCH 28/63] Update Envoy to f95f5391b0b8683081ec786ea946026594955fc6 (#562) Notable changes: - Handle the new `Envoy::Http::StreamResetReason:: ConnectError` & amend related test - Handle that `Envoy::Http::HeaderMap::get()` now returns a `Envoy::Http::HeaderMap::GetResult`. - Squelch Envoy's format check error that scans for urls in `bazel/repositories.bzl` Signed-off-by: Otto van der Schaaf --- .bazelrc | 6 ++--- bazel/repositories.bzl | 8 ++++-- source/client/stream_decoder.cc | 9 ++++--- source/server/http_filter_config_base.cc | 12 ++++++--- .../request_source_plugin_test.cc | 3 ++- ...p_dynamic_delay_filter_integration_test.cc | 25 +++++++++++++------ test/server/http_filter_base_test.cc | 20 +++++++++++++++ .../http_filter_integration_test_base.cc | 11 ++++++++ .../http_filter_integration_test_base.h | 19 ++++++++++++++ ...ttp_test_server_filter_integration_test.cc | 16 +++++++----- ...p_time_tracking_filter_integration_test.cc | 21 ++++++++-------- test/stream_decoder_test.cc | 20 +++++++++++++++ 12 files changed, 132 insertions(+), 38 deletions(-) diff --git a/.bazelrc b/.bazelrc index ac352e308..b22616bce 100644 --- a/.bazelrc +++ b/.bazelrc @@ -11,10 +11,10 @@ build:macos --copt -UDEBUG # Bazel doesn't need more than 200MB of memory for local build based on memory profiling: # https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling # The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large -# enough to consume all memory constrained by cgroup in large host, which is the case in CircleCI. +# enough to consume all memory constrained by cgroup in large host. # Limiting JVM heapsize here to let it do GC more when approaching the limit to # leave room for compiler/linker. -# The number 2G is choosed heuristically to both support in CircleCI and large enough for RBE. +# The number 2G is chosen heuristically to both support large VM and small VM with RBE. # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g @@ -201,8 +201,6 @@ build:remote --spawn_strategy=remote,sandboxed,local build:remote --strategy=Javac=remote,sandboxed,local build:remote --strategy=Closure=remote,sandboxed,local build:remote --strategy=Genrule=remote,sandboxed,local -# rules_rust is not remote runnable (yet) -build:remote --strategy=Rustc=sandboxed,local build:remote --remote_timeout=7200 build:remote --auth_enabled=true build:remote --remote_download_toplevel diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 583aa2a84..7b38a6368 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "2097fe908f2abb718757dbd4087d793c861d7c5a" # October 12th, 2020 -ENVOY_SHA = "323360544ee355f0eddea742b31a80a94899090db1d64029cd22880083b311c0" +ENVOY_COMMIT = "f95f5391b0b8683081ec786ea946026594955fc6" # October 21st, 2020 +ENVOY_SHA = "1129dcb0e18ec79ab56f59cbe5150f564e32d9221edd7ba1bd84b9b5377cbe35" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" @@ -11,7 +11,9 @@ def nighthawk_dependencies(): name = "envoy", sha256 = ENVOY_SHA, strip_prefix = "envoy-%s" % ENVOY_COMMIT, + # // clang-format off: Envoy's format check: Only repository_locations.bzl may contains URL references url = "https://github.com/envoyproxy/envoy/archive/%s.tar.gz" % ENVOY_COMMIT, + # // clang-format on ) http_archive( name = "dep_hdrhistogram_c", @@ -50,5 +52,7 @@ cc_library( """, sha256 = HDR_HISTOGRAM_C_SHA, strip_prefix = "HdrHistogram_c-%s" % HDR_HISTOGRAM_C_VERSION, + # // clang-format off url = "https://github.com/HdrHistogram/HdrHistogram_c/archive/%s.tar.gz" % HDR_HISTOGRAM_C_VERSION, + # // clang-format on ) diff --git a/source/client/stream_decoder.cc b/source/client/stream_decoder.cc index 21bf877f9..86f4c681d 100644 --- a/source/client/stream_decoder.cc +++ b/source/client/stream_decoder.cc @@ -21,9 +21,11 @@ void StreamDecoder::decodeHeaders(Envoy::Http::ResponseHeaderMapPtr&& headers, b stream_info_.response_code_ = static_cast(response_code); if (!latency_response_header_name_.empty()) { const auto timing_header_name = Envoy::Http::LowerCaseString(latency_response_header_name_); - const Envoy::Http::HeaderEntry* timing_header = response_headers_->get(timing_header_name); - if (timing_header != nullptr) { - absl::string_view timing_value = timing_header->value().getStringView(); + const Envoy::Http::HeaderMap::GetResult& timing_header = + response_headers_->get(timing_header_name); + if (!timing_header.empty()) { + absl::string_view timing_value = + timing_header.size() == 1 ? timing_header[0]->value().getStringView() : "multiple values"; int64_t origin_delta; if (absl::SimpleAtoi(timing_value, &origin_delta) && origin_delta >= 0) { origin_latency_statistic_.addValue(origin_delta); @@ -142,6 +144,7 @@ StreamDecoder::streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason re return Envoy::StreamInfo::ResponseFlag::LocalReset; case Envoy::Http::StreamResetReason::Overflow: return Envoy::StreamInfo::ResponseFlag::UpstreamOverflow; + case Envoy::Http::StreamResetReason::ConnectError: case Envoy::Http::StreamResetReason::RemoteReset: case Envoy::Http::StreamResetReason::RemoteRefusedStreamReset: return Envoy::StreamInfo::ResponseFlag::UpstreamRemoteReset; diff --git a/source/server/http_filter_config_base.cc b/source/server/http_filter_config_base.cc index a8dc933e9..b76505023 100644 --- a/source/server/http_filter_config_base.cc +++ b/source/server/http_filter_config_base.cc @@ -13,17 +13,23 @@ FilterConfigurationBase::FilterConfigurationBase( void FilterConfigurationBase::computeEffectiveConfiguration( const Envoy::Http::RequestHeaderMap& headers) { - const auto* request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); - if (request_config_header) { + const auto& request_config_header = headers.get(TestServer::HeaderNames::get().TestServerConfig); + if (request_config_header.size() == 1) { + // We could be more flexible and look for the first request header that has a value, + // but without a proper understanding of a real use case for that, we are assuming that any + // existence of duplicate headers here is an error. nighthawk::server::ResponseOptions response_options = *server_config_; std::string error_message; - if (Configuration::mergeJsonConfig(request_config_header->value().getStringView(), + if (Configuration::mergeJsonConfig(request_config_header[0]->value().getStringView(), response_options, error_message)) { effective_config_ = std::make_shared(std::move(response_options)); } else { effective_config_ = absl::InvalidArgumentError(error_message); } + } else if (request_config_header.size() > 1) { + effective_config_ = absl::InvalidArgumentError( + "Received multiple configuration headers in the request, expected only one."); } } diff --git a/test/request_source/request_source_plugin_test.cc b/test/request_source/request_source_plugin_test.cc index a7fa62cec..fc20b64ea 100644 --- a/test/request_source/request_source_plugin_test.cc +++ b/test/request_source/request_source_plugin_test.cc @@ -90,7 +90,8 @@ TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesWorkingPlugi Nighthawk::RequestGenerator generator = plugin->get(); Nighthawk::RequestPtr request = generator(); Nighthawk::HeaderMapPtr header = request->header(); - EXPECT_EQ(header->get(Envoy::Http::LowerCaseString("test_value"))->value().getStringView(), + ASSERT_EQ(header->get(Envoy::Http::LowerCaseString("test_value")).size(), 1); + EXPECT_EQ(header->get(Envoy::Http::LowerCaseString("test_value"))[0]->value().getStringView(), absl::string_view(std::to_string(test_value))); } TEST_F(FileBasedRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { diff --git a/test/server/http_dynamic_delay_filter_integration_test.cc b/test/server/http_dynamic_delay_filter_integration_test.cc index 3840a11ec..0dfdb797b 100644 --- a/test/server/http_dynamic_delay_filter_integration_test.cc +++ b/test/server/http_dynamic_delay_filter_integration_test.cc @@ -48,19 +48,21 @@ name: dynamic-delay // Don't send any config request header ... getResponse(ResponseOrigin::UPSTREAM); // ... we shouldn't observe any delay being requested via the upstream request headers. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString), nullptr); + EXPECT_TRUE(upstream_request_->headers().get(kDelayHeaderString).empty()); // Send a config request header with an empty / default configuration .... setRequestLevelConfiguration("{}"); getResponse(ResponseOrigin::UPSTREAM); // ... we shouldn't observe any delay being requested via the upstream request headers. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString), nullptr); + EXPECT_TRUE(upstream_request_->headers().get(kDelayHeaderString).empty()); // Send a config request header requesting a 1.6s delay... setRequestLevelConfiguration("{static_delay: \"1.6s\"}"); getResponse(ResponseOrigin::UPSTREAM); // ...we should observe a delay of 1.6s in the upstream request. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1600"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "1600"); } // Verify expectations with static/file-based static_delay configuration. @@ -75,13 +77,17 @@ name: dynamic-delay // Without any request-level configuration, we expect the statically configured static delay to // apply. getResponse(ResponseOrigin::UPSTREAM); - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1330"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "1330"); // With an empty request-level configuration, we expect the statically configured static delay to // apply. setRequestLevelConfiguration("{}"); getResponse(ResponseOrigin::UPSTREAM); - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "1330"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "1330"); // Overriding the statically configured static delay via request-level configuration should be // reflected in the output. @@ -92,7 +98,7 @@ name: dynamic-delay // However, the seconds part is set to '0', which equates to the default of the underlying int // type, and the fact that we are using proto3, which doesn't merge default values. // Hence the following expectation will fail, as it yields 1200 instead of the expected 200. - // EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), + // EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), // "200"); // Overriding the statically configured static delay via request-level configuration should be @@ -100,7 +106,9 @@ name: dynamic-delay setRequestLevelConfiguration("{static_delay: \"2.2s\"}"); getResponse(ResponseOrigin::UPSTREAM); // 2.2 seconds -> 2200 ms. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "2200"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), + "2200"); } // Verify expectations with static/file-based concurrency_based_linear_delay configuration. @@ -116,7 +124,8 @@ name: dynamic-delay getResponse(ResponseOrigin::UPSTREAM); // Based on the algorithm of concurrency_based_linear_delay, for the first request we expect to // observe the configured minimal_delay + concurrency_delay_factor = 0.06s -> 60ms. - EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)->value().getStringView(), "60"); + ASSERT_EQ(upstream_request_->headers().get(kDelayHeaderString).size(), 1); + EXPECT_EQ(upstream_request_->headers().get(kDelayHeaderString)[0]->value().getStringView(), "60"); } class ComputeTest : public testing::Test { diff --git a/test/server/http_filter_base_test.cc b/test/server/http_filter_base_test.cc index 212bd59b5..adf90896f 100644 --- a/test/server/http_filter_base_test.cc +++ b/test/server/http_filter_base_test.cc @@ -90,5 +90,25 @@ TEST_P(HttpFilterBaseIntegrationTest, EmptyRequestLevelConfigurationShouldFail) EXPECT_THAT(response->body(), HasSubstr(kBadConfigErrorSentinel)); } +TEST_P(HttpFilterBaseIntegrationTest, MultipleValidConfigurationHeadersFails) { + // Make sure we fail when two valid configuration headers are send. + setRequestLevelConfiguration("{}"); + appendRequestLevelConfiguration("{}"); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->body(), + HasSubstr("Received multiple configuration headers in the request")); +} + +TEST_P(HttpFilterBaseIntegrationTest, SingleValidPlusEmptyConfigurationHeadersFails) { + // Make sure we fail when both a valid configuration plus an empty configuration header is send. + setRequestLevelConfiguration("{}"); + appendRequestLevelConfiguration(""); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->body(), + HasSubstr("Received multiple configuration headers in the request")); +} + } // namespace } // namespace Nighthawk \ No newline at end of file diff --git a/test/server/http_filter_integration_test_base.cc b/test/server/http_filter_integration_test_base.cc index d204133ad..009ae2928 100644 --- a/test/server/http_filter_integration_test_base.cc +++ b/test/server/http_filter_integration_test_base.cc @@ -21,6 +21,12 @@ void HttpFilterIntegrationTestBase::setRequestLevelConfiguration( setRequestHeader(Server::TestServer::HeaderNames::get().TestServerConfig, request_level_config); } +void HttpFilterIntegrationTestBase::appendRequestLevelConfiguration( + absl::string_view request_level_config) { + appendRequestHeader(Server::TestServer::HeaderNames::get().TestServerConfig, + request_level_config); +} + void HttpFilterIntegrationTestBase::switchToPostWithEntityBody() { setRequestHeader(Envoy::Http::Headers::get().Method, Envoy::Http::Headers::get().MethodValues.Post); @@ -31,6 +37,11 @@ void HttpFilterIntegrationTestBase::setRequestHeader( request_headers_.setCopy(header_name, header_value); } +void HttpFilterIntegrationTestBase::appendRequestHeader( + const Envoy::Http::LowerCaseString& header_name, absl::string_view header_value) { + request_headers_.addCopy(header_name, header_value); +} + Envoy::IntegrationStreamDecoderPtr HttpFilterIntegrationTestBase::getResponse(ResponseOrigin expected_origin) { cleanupUpstreamAndDownstream(); diff --git a/test/server/http_filter_integration_test_base.h b/test/server/http_filter_integration_test_base.h index 8027753ec..53b5cc79e 100644 --- a/test/server/http_filter_integration_test_base.h +++ b/test/server/http_filter_integration_test_base.h @@ -52,6 +52,16 @@ class HttpFilterIntegrationTestBase : public Envoy::HttpIntegrationTest { */ void setRequestLevelConfiguration(absl::string_view request_level_config); + /** + * Make getResponse add request-level configuration. Test server extensions read that + * configuration and merge it with their static configuration to determine a final effective + * configuration. See TestServerConfig in well_known_headers.h for the up to date header name. + * + * @param request_level_config Configuration to be delivered by request-header in future calls to + * getResponse(). For example: "{response_body_size:1024}". + */ + void appendRequestLevelConfiguration(absl::string_view request_level_config); + /** * Switch getResponse() to use the POST request method with an entity body. * Doing so will make tests hit a different code paths in extensions. @@ -67,6 +77,15 @@ class HttpFilterIntegrationTestBase : public Envoy::HttpIntegrationTest { void setRequestHeader(const Envoy::Http::LowerCaseString& header_name, absl::string_view header_value); + /** + * Appends a request header value. + * + * @param header_name Name of the request header to set. + * @param header_value Value to set for the request header. + */ + void appendRequestHeader(const Envoy::Http::LowerCaseString& header_name, + absl::string_view header_value); + /** * Fetch a response, according to the options specified by the class methods. By default, * simulates a GET request with minimal headers. diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 8b355101f..5850dab7e 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -38,8 +38,8 @@ class HttpTestServerIntegrationTest : public HttpFilterIntegrationTestBase, EXPECT_EQ("200", response->headers().Status()->value().getStringView()); if (expect_header) { auto inserted_header = response->headers().get(Envoy::Http::LowerCaseString("x-supplied-by")); - ASSERT_NE(nullptr, inserted_header); - EXPECT_EQ("nighthawk-test-server", inserted_header->value().getStringView()); + ASSERT_EQ(1, inserted_header.size()); + EXPECT_EQ("nighthawk-test-server", inserted_header[0]->value().getStringView()); } if (response_body_size == 0) { EXPECT_EQ(nullptr, response->headers().ContentType()); @@ -107,8 +107,10 @@ TEST_P(HttpTestServerIntegrationTest, TestHeaderConfig) { Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ("bar2", - response->headers().get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); + ASSERT_EQ(1, response->headers().get(Envoy::Http::LowerCaseString("foo")).size()); + EXPECT_EQ( + "bar2", + response->headers().get(Envoy::Http::LowerCaseString("foo"))[0]->value().getStringView()); EXPECT_EQ(std::string(10, 'a'), response->body()); } @@ -180,8 +182,10 @@ TEST_P(HttpTestServerIntegrationTest, TestNoStaticConfigHeaderConfig) { ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ("bar2", - response->headers().get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); + ASSERT_EQ(1, response->headers().get(Envoy::Http::LowerCaseString("foo")).size()); + EXPECT_EQ( + "bar2", + response->headers().get(Envoy::Http::LowerCaseString("foo"))[0]->value().getStringView()); EXPECT_EQ("", response->body()); } diff --git a/test/server/http_time_tracking_filter_integration_test.cc b/test/server/http_time_tracking_filter_integration_test.cc index 5f1348c56..c61124392 100644 --- a/test/server/http_time_tracking_filter_integration_test.cc +++ b/test/server/http_time_tracking_filter_integration_test.cc @@ -44,16 +44,15 @@ TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForStaticConfigura // As the first request doesn't have a prior one, we should not observe a delta. Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::UPSTREAM); int64_t latency; - const Envoy::Http::HeaderEntry* latency_header_1 = - response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); - EXPECT_EQ(latency_header_1, nullptr); + EXPECT_EQ( + response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)).size(), 0); // On the second request we should observe a delta. response = getResponse(ResponseOrigin::UPSTREAM); - const Envoy::Http::HeaderEntry* latency_header_2 = + const Envoy::Http::HeaderMap::GetResult& latency_header = response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); - ASSERT_NE(latency_header_2, nullptr); - EXPECT_TRUE(absl::SimpleAtoi(latency_header_2->value().getStringView(), &latency)); + ASSERT_EQ(latency_header.size(), 1); + EXPECT_TRUE(absl::SimpleAtoi(latency_header[0]->value().getStringView(), &latency)); EXPECT_GT(latency, 0); } @@ -63,18 +62,18 @@ TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForPerRequestConfi // As the first request doesn't have a prior one, we should not observe a delta. setRequestLevelConfiguration("{}"); Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::UPSTREAM); - EXPECT_EQ(response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)), - nullptr); + EXPECT_TRUE( + response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)).empty()); // With request level configuration indicating that the timing header should be emitted, // we should be able to observe it. setRequestLevelConfiguration(fmt::format("{{{}}}", kDefaultProtoFragment)); response = getResponse(ResponseOrigin::UPSTREAM); - const Envoy::Http::HeaderEntry* latency_header = + const Envoy::Http::HeaderMap::GetResult& latency_header = response->headers().get(Envoy::Http::LowerCaseString(kLatencyResponseHeaderName)); - ASSERT_NE(latency_header, nullptr); + ASSERT_EQ(latency_header.size(), 1); int64_t latency; - EXPECT_TRUE(absl::SimpleAtoi(latency_header->value().getStringView(), &latency)); + EXPECT_TRUE(absl::SimpleAtoi(latency_header[0]->value().getStringView(), &latency)); // TODO(oschaaf): figure out if we can use simtime here, and verify actual timing matches // what we'd expect using that. EXPECT_GT(latency, 0); diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index 8c426bbd8..4603c0e65 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -211,6 +211,9 @@ TEST_F(StreamDecoderTest, StreamResetReasonToResponseFlag) { ASSERT_EQ(StreamDecoder::streamResetReasonToResponseFlag( Envoy::Http::StreamResetReason::RemoteRefusedStreamReset), Envoy::StreamInfo::ResponseFlag::UpstreamRemoteReset); + ASSERT_EQ( + StreamDecoder::streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason::ConnectError), + Envoy::StreamInfo::ResponseFlag::UpstreamRemoteReset); } // This test parameterization structure carries the response header name that ought to be treated @@ -245,5 +248,22 @@ TEST_P(LatencyTrackingViaResponseHeaderTest, LatencyTrackingViaResponseHeader) { EXPECT_EQ(origin_latency_statistic_.count(), expected_count); } +// Test that a single response carrying multiple valid latency response headers does not +// get tracked. This will also yield a burst of warnings, which we unfortunately cannot +// easily verify here. +TEST_F(StreamDecoderTest, LatencyTrackingWithMultipleResponseHeadersFails) { + const std::string kLatencyTrackingResponseHeader = "latency-in-response-header"; + auto decoder = new StreamDecoder( + *dispatcher_, time_system_, *this, [](bool, bool) {}, connect_statistic_, latency_statistic_, + response_header_size_statistic_, response_body_size_statistic_, origin_latency_statistic_, + request_headers_, false, 0, random_generator_, http_tracer_, kLatencyTrackingResponseHeader); + Envoy::Http::ResponseHeaderMapPtr headers{ + new Envoy::Http::TestResponseHeaderMapImpl{{":status", "200"}, + {kLatencyTrackingResponseHeader, "1"}, + {kLatencyTrackingResponseHeader, "2"}}}; + decoder->decodeHeaders(std::move(headers), true); + EXPECT_EQ(origin_latency_statistic_.count(), 0); +} + } // namespace Client } // namespace Nighthawk From 578c88347632d1568070663cc106e6cd31b5fe97 Mon Sep 17 00:00:00 2001 From: wjuan-AFK <66322422+wjuan-AFK@users.noreply.github.com> Date: Thu, 22 Oct 2020 16:02:21 -0400 Subject: [PATCH 29/63] Adding proto only Request Source Config factory (#560) This is a more usable version of the Request Source Config Factory similar to the FIle Based Config Factory which is easier to pass through the CLI. Signed-off-by: William Juan <66322422+wjuan-AFK@users.noreply.github.com> --- .../request_source_plugin.proto | 33 +++- .../request_options_list_plugin_impl.cc | 65 +++++-- .../request_options_list_plugin_impl.h | 80 +++++--- .../request_source_plugin_test.cc | 183 +++++++++++++++--- 4 files changed, 290 insertions(+), 71 deletions(-) diff --git a/api/request_source/request_source_plugin.proto b/api/request_source/request_source_plugin.proto index 1af69a60a..5d56802d9 100644 --- a/api/request_source/request_source_plugin.proto +++ b/api/request_source/request_source_plugin.proto @@ -7,24 +7,41 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; import "api/client/options.proto"; -// Configuration for FileBasedPluginRequestSource (plugin name: -// "nighthawk.file-based-request-source-plugin") +// Configuration for OptionsListFromFileRequestSourceFactory (plugin name: +// "nighthawk.file-options-list-request-source-plugin") // The factory will load the RequestOptionsList from the file, and then passes it to the // requestSource it generates. The resulting request source will loop over the RequestOptionsList it // is passed. -message FileBasedPluginConfig { +message FileBasedOptionsListRequestSourceConfig { // The file_path is the path to a file that contains a RequestOptionList in json or yaml format. + // This field is required. string file_path = 1; // The pluginfactory makes requestSources that will generate requests from the RequestOptionList // up to num_requests number of times. If num_requests exceeds the number of RequestOptions in the - // RequestOptionList located in the file at file_path, it will loop. num_requests = 0 means no - // limit on the number of requests to be produced. - google.protobuf.UInt32Value num_requests = 2 [(validate.rules).uint32 = {gte: 0, lte: 1000000}]; - // The pluginfactory will load the file located in file_path as long as it is below max_file_size, - // if it's too large it will throw an error. + // RequestOptionList located in the file at file_path, it will loop. num_requests = 0 means it + // will loop indefinitely, though it will still terminate by normal mechanisms. + uint32 num_requests = 2; + // The pluginfactory will load the file located in file_path as long as it is below max_file_size + // in bytes, if it's too large it will throw an error. This field is optional with a default of + // 1000000. google.protobuf.UInt32Value max_file_size = 3 [(validate.rules).uint32 = {lte: 1000000}]; } +// Configuration for OptionsListFromProtoRequestSourceFactory (plugin name: +// "nighthawk.in-line-options-list-request-source-plugin") +// The resulting request source will loop over the RequestOptionsList it +// is passed. +message InLineOptionsListRequestSourceConfig { + // The options_list will be used to generate Requests in the RequestSource. This field is + // required. + nighthawk.client.RequestOptionsList options_list = 1; + // The pluginfactory makes requestSources that will generate requests from the RequestOptionList + // up to num_requests number of times. If num_requests exceeds the number of RequestOptions in the + // options_list, it will loop. num_requests = 0 means it will loop indefinitely, though it will + // still terminate by normal mechanisms. + uint32 num_requests = 2; +} + // Configuration for StubPluginRequestSource (plugin name: "nighthawk.stub-request-source-plugin") // The plugin does nothing. This is for testing and comparison of the Request Source Plugin Factory // mechanism using a minimal version of plugin that does not require a more complicated proto or diff --git a/source/request_source/request_options_list_plugin_impl.cc b/source/request_source/request_options_list_plugin_impl.cc index 7fd25f3aa..bdb9a3fbc 100644 --- a/source/request_source/request_options_list_plugin_impl.cc +++ b/source/request_source/request_options_list_plugin_impl.cc @@ -10,23 +10,24 @@ #include "common/request_source_impl.h" namespace Nighthawk { -std::string OptionsListFromFileRequestSourceFactory::name() const { +std::string FileBasedOptionsListRequestSourceFactory::name() const { return "nighthawk.file-based-request-source-plugin"; } -Envoy::ProtobufTypes::MessagePtr OptionsListFromFileRequestSourceFactory::createEmptyConfigProto() { - return std::make_unique(); +Envoy::ProtobufTypes::MessagePtr +FileBasedOptionsListRequestSourceFactory::createEmptyConfigProto() { + return std::make_unique(); } -RequestSourcePtr OptionsListFromFileRequestSourceFactory::createRequestSourcePlugin( +RequestSourcePtr FileBasedOptionsListRequestSourceFactory::createRequestSourcePlugin( const Envoy::Protobuf::Message& message, Envoy::Api::Api& api, Envoy::Http::RequestHeaderMapPtr header) { const auto& any = dynamic_cast(message); - nighthawk::request_source::FileBasedPluginConfig config; + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config; Envoy::MessageUtil util; - + uint32_t max_file_size = config.has_max_file_size() ? config.max_file_size().value() : 1000000; util.unpackTo(any, config); - if (api.fileSystem().fileSize(config.file_path()) > config.max_file_size().value()) { + if (api.fileSystem().fileSize(config.file_path()) > max_file_size) { throw NighthawkException("file size must be less than max_file_size"); } @@ -34,23 +35,54 @@ RequestSourcePtr OptionsListFromFileRequestSourceFactory::createRequestSourcePlu { Envoy::Thread::LockGuard lock_guard(file_lock_); // Reading the file only the first time. - if (options_list_.options_size() == 0) { - util.loadFromFile(config.file_path(), options_list_, + if (!options_list_.has_value()) { + nighthawk::client::RequestOptionsList loaded_list; + util.loadFromFile(config.file_path(), loaded_list, Envoy::ProtobufMessage::getStrictValidationVisitor(), api, true); + options_list_ = loaded_list; + } + } + return std::make_unique(config.num_requests(), std::move(header), + options_list_.value()); +} + +REGISTER_FACTORY(FileBasedOptionsListRequestSourceFactory, RequestSourcePluginConfigFactory); + +std::string InLineOptionsListRequestSourceFactory::name() const { + return "nighthawk.in-line-options-list-request-source-plugin"; +} + +Envoy::ProtobufTypes::MessagePtr InLineOptionsListRequestSourceFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +RequestSourcePtr InLineOptionsListRequestSourceFactory::createRequestSourcePlugin( + const Envoy::Protobuf::Message& message, Envoy::Api::Api&, + Envoy::Http::RequestHeaderMapPtr header) { + const auto& any = dynamic_cast(message); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config; + Envoy::MessageUtil::unpackTo(any, config); + // Locking to avoid issues with multiple threads calling this at the same time and trying to set + // the options_list_ + { + Envoy::Thread::LockGuard lock_guard(options_list_lock_); + // Only loading the config into memory the first time. + if (!options_list_.has_value()) { + options_list_ = config.options_list(); } } - return std::make_unique(config.num_requests().value(), - std::move(header), options_list_); + return std::make_unique(config.num_requests(), std::move(header), + options_list_.value()); } -REGISTER_FACTORY(OptionsListFromFileRequestSourceFactory, RequestSourcePluginConfigFactory); +REGISTER_FACTORY(InLineOptionsListRequestSourceFactory, RequestSourcePluginConfigFactory); -RequestOptionsListRequestSource::RequestOptionsListRequestSource( +OptionsListRequestSource::OptionsListRequestSource( const uint32_t total_requests, Envoy::Http::RequestHeaderMapPtr header, const nighthawk::client::RequestOptionsList& options_list) : header_(std::move(header)), options_list_(options_list), total_requests_(total_requests) {} -RequestGenerator RequestOptionsListRequestSource::get() { +RequestGenerator OptionsListRequestSource::get() { request_count_.push_back(0); uint32_t& lambda_counter = request_count_.back(); RequestGenerator request_generator = [this, lambda_counter]() mutable -> RequestPtr { @@ -72,7 +104,8 @@ RequestGenerator RequestOptionsListRequestSource::get() { header->setMethod(envoy::config::core::v3::RequestMethod_Name(request_option.request_method())); const uint32_t content_length = request_option.request_body_size().value(); if (content_length > 0) { - header->setContentLength(content_length); + header->setContentLength( + content_length); // Content length is used later in stream_decoder to populate the body } for (const envoy::config::core::v3::HeaderValueOption& option_header : request_option.request_headers()) { @@ -84,6 +117,6 @@ RequestGenerator RequestOptionsListRequestSource::get() { return request_generator; } -void RequestOptionsListRequestSource::initOnThread() {} +void OptionsListRequestSource::initOnThread() {} } // namespace Nighthawk \ No newline at end of file diff --git a/source/request_source/request_options_list_plugin_impl.h b/source/request_source/request_options_list_plugin_impl.h index 3fbf485ff..8de3d7ef9 100644 --- a/source/request_source/request_options_list_plugin_impl.h +++ b/source/request_source/request_options_list_plugin_impl.h @@ -1,4 +1,4 @@ -// Implementations of RequestSourceConfigFactories that make a RequestOptionsListRequestSource. +// Implementations of RequestSourceConfigFactories that make a OptionsListRequestSource. #pragma once #include "envoy/registry/registry.h" @@ -25,14 +25,13 @@ namespace Nighthawk { // RequestGenerator produced by get() will use options from the options_list to overwrite values in // the default header, and create new requests. if total_requests is greater than the length of // options_list, it will loop. This is not thread safe. -class RequestOptionsListRequestSource : public RequestSource { +class OptionsListRequestSource : public RequestSource { public: - RequestOptionsListRequestSource(const uint32_t total_requests, - Envoy::Http::RequestHeaderMapPtr header, - const nighthawk::client::RequestOptionsList& options_list); + OptionsListRequestSource(const uint32_t total_requests, Envoy::Http::RequestHeaderMapPtr header, + const nighthawk::client::RequestOptionsList& options_list); // This get function is not thread safe, because multiple threads calling get simultaneously will - // result in a collision as it attempts to update its request_count_. + // result in a collision. RequestGenerator get() override; // default implementation @@ -45,40 +44,75 @@ class RequestOptionsListRequestSource : public RequestSource { const uint32_t total_requests_; }; -// Factory that creates a RequestOptionsListRequestSource from a FileBasedPluginConfig proto. -// Registered as an Envoy plugin. -// Implementation of RequestSourceConfigFactory which produces a RequestSource that keeps an -// RequestOptionsList in memory, and loads it with the RequestOptions taken from a file. All plugins -// configuration are specified in the request_source_plugin.proto. This class is not thread-safe, -// because it loads its RequestOptionlist in memory from a file when first called. -// Usage: assume you are passed an appropriate Any type object called config, an Api object called -// api, and a default header called header. auto& config_factory = +// Factory that creates a OptionsListRequestSource from a FileBasedOptionsListRequestSourceConfig +// proto. Registered as an Envoy plugin. Implementation of RequestSourceConfigFactory which produces +// a RequestSource that keeps an RequestOptionsList in memory, and loads it with the RequestOptions +// taken from a file. All plugins configuration are specified in the request_source_plugin.proto. +// This class is thread-safe, +// Usage: assume you are passed an appropriate Any type object called config, an Api +// object called api, and a default header called header. auto& config_factory = // Envoy::Config::Utility::getAndCheckFactoryByName( // "nighthawk.file-based-request-source-plugin"); // RequestSourcePtr plugin = // config_factory.createRequestSourcePlugin(config, std::move(api), std::move(header)); -class OptionsListFromFileRequestSourceFactory : public virtual RequestSourcePluginConfigFactory { +class FileBasedOptionsListRequestSourceFactory : public virtual RequestSourcePluginConfigFactory { public: std::string name() const override; Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override; - // This implementation is not thread safe. Only the first call to createRequestSourcePlugin will - // load the file from memory and subsequent calls just make a copy of the options_list that was - // already loaded. The OptionsListFromFileRequestSourceFactory will not work with multiple - // different files for this reason. - // This method will also error if the file can not be loaded correctly, e.g. the file is too large - // or could not be found. + // This implementation is thread safe. There is currently a behaviour such that only the first + // call to createRequestSourcePlugin will load the options list into memory and subsequent calls + // just make a copy of the options_list that was already loaded. The + // FileBasedOptionsListRequestSourceFactory will not work with multiple different files for this + // reason. + // TODO: This memory saving is likely a premature optimization, and should be removed. + // This method will also error if the + // file can not be loaded correctly, e.g. the file is too large or could not be found. RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& message, Envoy::Api::Api& api, Envoy::Http::RequestHeaderMapPtr header) override; private: Envoy::Thread::MutexBasicLockable file_lock_; - nighthawk::client::RequestOptionsList options_list_; + absl::optional options_list_; }; // This factory will be activated through RequestSourceFactory in factories.h -DECLARE_FACTORY(OptionsListFromFileRequestSourceFactory); +DECLARE_FACTORY(FileBasedOptionsListRequestSourceFactory); + +// Factory that creates a OptionsListRequestSource from a InLineOptionsListRequestSourceConfig +// proto. Registered as an Envoy plugin. Implementation of RequestSourceConfigFactory which produces +// a RequestSource that keeps an RequestOptionsList in memory, and loads it with the RequestOptions +// passed to it from the config. All plugins configuration are specified in the +// request_source_plugin.proto. +// This class is thread-safe, +// Usage: assume you are passed an appropriate Any type object called +// config, an Api object called api, and a default header called header. auto& config_factory = +// Envoy::Config::Utility::getAndCheckFactoryByName( +// "nighthawk.in-line-options-list-request-source-plugin"); +// RequestSourcePtr plugin = +// config_factory.createRequestSourcePlugin(config, std::move(api), std::move(header)); + +class InLineOptionsListRequestSourceFactory : public virtual RequestSourcePluginConfigFactory { +public: + std::string name() const override; + Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + // This implementation is thread safe. There is currently a behaviour such that only the first + // call to createRequestSourcePlugin will load the options list into memory and subsequent calls + // just make a copy of the options_list that was already loaded. + // TODO: This memory saving is likely a premature optimization, and should be removed. + RequestSourcePtr createRequestSourcePlugin(const Envoy::Protobuf::Message& message, + Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) override; + +private: + Envoy::Thread::MutexBasicLockable options_list_lock_; + absl::optional options_list_; +}; + +// This factory will be activated through RequestSourceFactory in factories.h +DECLARE_FACTORY(InLineOptionsListRequestSourceFactory); } // namespace Nighthawk \ No newline at end of file diff --git a/test/request_source/request_source_plugin_test.cc b/test/request_source/request_source_plugin_test.cc index fc20b64ea..eee187522 100644 --- a/test/request_source/request_source_plugin_test.cc +++ b/test/request_source/request_source_plugin_test.cc @@ -17,10 +17,25 @@ namespace Nighthawk { namespace { -using nighthawk::request_source::FileBasedPluginConfig; +using nighthawk::request_source::FileBasedOptionsListRequestSourceConfig; +using nighthawk::request_source::InLineOptionsListRequestSourceConfig; using nighthawk::request_source::StubPluginConfig; using ::testing::NiceMock; using ::testing::Test; +nighthawk::request_source::FileBasedOptionsListRequestSourceConfig +MakeFileBasedPluginConfigWithTestYaml(absl::string_view request_file) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config; + config.mutable_file_path()->assign(request_file); + config.mutable_max_file_size()->set_value(4000); + return config; +} +nighthawk::request_source::InLineOptionsListRequestSourceConfig +MakeInLinePluginConfig(nighthawk::client::RequestOptionsList options_list, int num_requests) { + nighthawk::request_source::InLineOptionsListRequestSourceConfig config; + *config.mutable_options_list() = std::move(options_list); + config.set_num_requests(num_requests); + return config; +} class StubRequestSourcePluginTest : public Test { public: @@ -34,15 +49,14 @@ class FileBasedRequestSourcePluginTest : public Test { FileBasedRequestSourcePluginTest() : api_(Envoy::Api::createApiForTest(stats_store_)) {} Envoy::Stats::MockIsolatedStatsStore stats_store_; Envoy::Api::ApiPtr api_; - nighthawk::request_source::FileBasedPluginConfig - MakeFileBasedPluginConfigWithTestYaml(absl::string_view request_file) { - nighthawk::request_source::FileBasedPluginConfig config; - config.mutable_file_path()->assign(request_file); - config.mutable_max_file_size()->set_value(4000); - return config; - } }; +class InLineRequestSourcePluginTest : public Test { +public: + InLineRequestSourcePluginTest() : api_(Envoy::Api::createApiForTest(stats_store_)) {} + Envoy::Stats::MockIsolatedStatsStore stats_store_; + Envoy::Api::ApiPtr api_; +}; TEST_F(StubRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { auto& config_factory = Envoy::Config::Utility::getAndCheckFactoryByName( @@ -99,13 +113,13 @@ TEST_F(FileBasedRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectTyp Envoy::Config::Utility::getAndCheckFactoryByName( "nighthawk.file-based-request-source-plugin"); const Envoy::ProtobufTypes::MessagePtr empty_config = config_factory.createEmptyConfigProto(); - const nighthawk::request_source::FileBasedPluginConfig expected_config; + const nighthawk::request_source::FileBasedOptionsListRequestSourceConfig expected_config; EXPECT_EQ(empty_config->DebugString(), expected_config.DebugString()); EXPECT_TRUE(Envoy::MessageUtil()(*empty_config, expected_config)); } TEST_F(FileBasedRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginName) { - nighthawk::request_source::FileBasedPluginConfig config; + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config; Envoy::ProtobufWkt::Any config_any; config_any.PackFrom(config); auto& config_factory = @@ -115,8 +129,9 @@ TEST_F(FileBasedRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginNam } TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPluginType) { - nighthawk::request_source::FileBasedPluginConfig config = MakeFileBasedPluginConfigWithTestYaml( - TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); Envoy::ProtobufWkt::Any config_any; config_any.PackFrom(config); auto& config_factory = @@ -125,14 +140,15 @@ TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrect auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr plugin = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); - EXPECT_NE(dynamic_cast(plugin.get()), nullptr); + EXPECT_NE(dynamic_cast(plugin.get()), nullptr); } TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginGetsWorkingRequestGeneratorThatEndsAtNumRequest) { - nighthawk::request_source::FileBasedPluginConfig config = MakeFileBasedPluginConfigWithTestYaml( - TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); - config.mutable_num_requests()->set_value(2); + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + config.set_num_requests(2); Envoy::ProtobufWkt::Any config_any; config_any.PackFrom(config); auto& config_factory = @@ -142,10 +158,13 @@ TEST_F(FileBasedRequestSourcePluginTest, RequestSourcePtr file_based_request_source = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); Nighthawk::RequestGenerator generator = file_based_request_source->get(); - Nighthawk::RequestPtr request = generator(); + Nighthawk::RequestPtr request1 = generator(); Nighthawk::RequestPtr request2 = generator(); Nighthawk::RequestPtr request3 = generator(); - Nighthawk::HeaderMapPtr header1 = request->header(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); Nighthawk::HeaderMapPtr header2 = request2->header(); EXPECT_EQ(header1->getPathValue(), "/a"); EXPECT_EQ(header2->getPathValue(), "/b"); @@ -153,10 +172,10 @@ TEST_F(FileBasedRequestSourcePluginTest, } TEST_F(FileBasedRequestSourcePluginTest, - CreateRequestSourcePluginWithMoreNumRequestsThanInFileGetsWorkingRequestGeneratorThatLoops) { - nighthawk::request_source::FileBasedPluginConfig config = MakeFileBasedPluginConfigWithTestYaml( - TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); - config.mutable_num_requests()->set_value(4); + CreateRequestSourcePluginWithMoreNumRequestsThanInFileGetsRequestGeneratorThatLoops) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); Envoy::ProtobufWkt::Any config_any; config_any.PackFrom(config); auto& config_factory = @@ -166,10 +185,126 @@ TEST_F(FileBasedRequestSourcePluginTest, RequestSourcePtr file_based_request_source = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); Nighthawk::RequestGenerator generator = file_based_request_source->get(); - Nighthawk::RequestPtr request = generator(); + Nighthawk::RequestPtr request1 = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + ASSERT_NE(request3, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + Nighthawk::HeaderMapPtr header3 = request3->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(header3->getPathValue(), "/a"); +} + +TEST_F(InLineRequestSourcePluginTest, CreateEmptyConfigProtoCreatesCorrectType) { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + const Envoy::ProtobufTypes::MessagePtr empty_config = config_factory.createEmptyConfigProto(); + const nighthawk::request_source::InLineOptionsListRequestSourceConfig expected_config; + EXPECT_EQ(empty_config->DebugString(), expected_config.DebugString()); + EXPECT_TRUE(Envoy::MessageUtil()(*empty_config, expected_config)); +} + +TEST_F(InLineRequestSourcePluginTest, FactoryRegistrationUsesCorrectPluginName) { + nighthawk::request_source::InLineOptionsListRequestSourceConfig config; + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + EXPECT_EQ(config_factory.name(), "nighthawk.in-line-options-list-request-source-plugin"); +} + +TEST_F(InLineRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPluginType) { + Envoy::MessageUtil util; + nighthawk::client::RequestOptionsList options_list; + util.loadFromFile(/*file to load*/ TestEnvironment::runfilesPath( + "test/request_source/test_data/test-config.yaml"), + /*out parameter*/ options_list, + /*validation visitor*/ Envoy::ProtobufMessage::getStrictValidationVisitor(), + /*Api*/ *api_, + /*use api boosting*/ true); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config = + MakeInLinePluginConfig(options_list, /*num_requests*/ 2); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + EXPECT_NE(dynamic_cast(plugin.get()), nullptr); +} + +TEST_F(InLineRequestSourcePluginTest, + CreateRequestSourcePluginGetsWorkingRequestGeneratorThatEndsAtNumRequest) { + Envoy::MessageUtil util; + nighthawk::client::RequestOptionsList options_list; + util.loadFromFile(/*file to load*/ TestEnvironment::runfilesPath( + "test/request_source/test_data/test-config.yaml"), + /*out parameter*/ options_list, + /*validation visitor*/ Envoy::ProtobufMessage::getStrictValidationVisitor(), + /*Api*/ *api_, + /*use api boosting*/ true); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config = + MakeInLinePluginConfig(options_list, /*num_requests*/ 2); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + Nighthawk::RequestGenerator generator = plugin->get(); + Nighthawk::RequestPtr request1 = generator(); + Nighthawk::RequestPtr request2 = generator(); + Nighthawk::RequestPtr request3 = generator(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); + Nighthawk::HeaderMapPtr header2 = request2->header(); + EXPECT_EQ(header1->getPathValue(), "/a"); + EXPECT_EQ(header2->getPathValue(), "/b"); + EXPECT_EQ(request3, nullptr); +} + +TEST_F(InLineRequestSourcePluginTest, + CreateRequestSourcePluginWithMoreNumRequestsThanInListGetsRequestGeneratorThatLoops) { + Envoy::MessageUtil util; + nighthawk::client::RequestOptionsList options_list; + util.loadFromFile(/*file to load*/ TestEnvironment::runfilesPath( + "test/request_source/test_data/test-config.yaml"), + /*out parameter*/ options_list, + /*validation visitor*/ Envoy::ProtobufMessage::getStrictValidationVisitor(), + /*Api*/ *api_, + /*use api boosting*/ true); + nighthawk::request_source::InLineOptionsListRequestSourceConfig config = + MakeInLinePluginConfig(options_list, /*num_requests*/ 4); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.in-line-options-list-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + RequestSourcePtr plugin = + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + Nighthawk::RequestGenerator generator = plugin->get(); + Nighthawk::RequestPtr request1 = generator(); Nighthawk::RequestPtr request2 = generator(); Nighthawk::RequestPtr request3 = generator(); - Nighthawk::HeaderMapPtr header1 = request->header(); + ASSERT_NE(request1, nullptr); + ASSERT_NE(request2, nullptr); + ASSERT_NE(request3, nullptr); + + Nighthawk::HeaderMapPtr header1 = request1->header(); Nighthawk::HeaderMapPtr header2 = request2->header(); Nighthawk::HeaderMapPtr header3 = request3->header(); EXPECT_EQ(header1->getPathValue(), "/a"); From 5e82e51cdf3535d997622af986231393f27c985d Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Thu, 29 Oct 2020 18:55:26 +0100 Subject: [PATCH 30/63] Update Envoy to b9ed0a991d90363ad04da2191891f54790482dcf (#565) Signed-off-by: Otto van der Schaaf --- .bazelrc | 17 +++++++++++++++++ bazel/repositories.bzl | 4 ++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/.bazelrc b/.bazelrc index b22616bce..0f66d819b 100644 --- a/.bazelrc +++ b/.bazelrc @@ -70,6 +70,9 @@ build:asan --copt -D__SANITIZE_ADDRESS__ build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true:detect_odr_violation=1 build:asan --test_env=UBSAN_OPTIONS=halt_on_error=true:print_stacktrace=1 build:asan --test_env=ASAN_SYMBOLIZER_PATH +# ASAN needs -O1 to get reasonable performance. +build:asan --copt -O1 +build:asan --copt -fno-optimize-sibling-calls # Clang ASAN/UBSAN build:clang-asan --config=asan @@ -173,6 +176,16 @@ build:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --define force_libcpp=enabled +# Do not inherit from "clang-asan" to avoid picking up flags from local clang.bazelrc. +build:rbe-toolchain-asan --config=asan +build:rbe-toolchain-asan --linkopt -fuse-ld=lld +build:rbe-toolchain-asan --action_env=ENVOY_UBSAN_VPTR=1 +build:rbe-toolchain-asan --copt=-fsanitize=vptr,function +build:rbe-toolchain-asan --linkopt=-fsanitize=vptr,function +build:rbe-toolchain-asan --linkopt=-L/opt/llvm/lib/clang/10.0.0/lib/linux +build:rbe-toolchain-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a +build:rbe-toolchain-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a + build:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib build:rbe-toolchain-msan --config=clang-msan @@ -223,6 +236,10 @@ build:remote-clang-libc++ --config=rbe-toolchain-clang-libc++ build:remote-gcc --config=remote build:remote-gcc --config=rbe-toolchain-gcc +build:remote-asan --config=remote +build:remote-asan --config=rbe-toolchain-clang-libc++ +build:remote-asan --config=rbe-toolchain-asan + build:remote-msan --config=remote build:remote-msan --config=rbe-toolchain-clang-libc++ build:remote-msan --config=rbe-toolchain-msan diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 7b38a6368..f80b65023 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "f95f5391b0b8683081ec786ea946026594955fc6" # October 21st, 2020 -ENVOY_SHA = "1129dcb0e18ec79ab56f59cbe5150f564e32d9221edd7ba1bd84b9b5377cbe35" +ENVOY_COMMIT = "b9ed0a991d90363ad04da2191891f54790482dcf" # October 28th, 2020 +ENVOY_SHA = "8917c7eaf25f56a5966930bafc659e8a1898c9d027a02389e50808c3b5f26c94" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" From fc2332021a8871f8801b66e56af63b3d29b6c272 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 2 Nov 2020 16:56:11 +0100 Subject: [PATCH 31/63] Update Envoy to 0226d0e084d832ce24ee6303f5cb2fc01ec4970b (#567) Signed-off-by: Otto van der Schaaf --- .bazelversion | 2 +- bazel/repositories.bzl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.bazelversion b/.bazelversion index 47b322c97..40c341bdc 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.4.1 +3.6.0 diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index f80b65023..c4fb1f405 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "b9ed0a991d90363ad04da2191891f54790482dcf" # October 28th, 2020 -ENVOY_SHA = "8917c7eaf25f56a5966930bafc659e8a1898c9d027a02389e50808c3b5f26c94" +ENVOY_COMMIT = "0226d0e084d832ce24ee6303f5cb2fc01ec4970b" # November 2nd, 2020 +ENVOY_SHA = "ed91cf0946155aac81c4601d55d81ba10d9189628259846c62f84fbe5a9059c0" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" From 525c7965b1eee1728c112b590e0a5fdcdffbd2d0 Mon Sep 17 00:00:00 2001 From: wjuan-AFK <66322422+wjuan-AFK@users.noreply.github.com> Date: Thu, 5 Nov 2020 15:09:44 -0500 Subject: [PATCH 32/63] Cli change for file based request source (#563) Commit to add RequestSourcePlugin option to the CLI. base PR: https://github.com/envoyproxy/nighthawk/pull/560. This is necessary to make use of the previous PRs adding the requestsourcepluginfactory via the CLI. Signed-off-by: William Juan <66322422+wjuan-AFK@users.noreply.github.com> --- README.md | 13 +- api/client/options.proto | 5 + ci/do_ci.sh | 3 +- include/nighthawk/client/options.h | 2 + source/client/BUILD | 2 + source/client/factories_impl.cc | 6 +- source/client/options_impl.cc | 35 ++++- source/client/options_impl.h | 7 + .../request_options_list_plugin_impl.cc | 2 +- test/mocks/client/mock_options.h | 2 + test/options_test.cc | 121 +++++++++++++++++- .../request_source_plugin_test.cc | 26 ++++ 12 files changed, 212 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index e5bdb24ba..4c3a2791f 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ bazel-bin/nighthawk_client [--latency-response-header-name ] [--stats-flush-interval ] [--stats-sinks ] ... [--no-duration] [--simple-warmup] +[--request-source-plugin-config ] [--request-source ] [--label ] ... [--multi-target-use-https] [--multi-target-path ] @@ -109,10 +110,20 @@ Perform a simple single warmup request (per worker) before starting execution. Note that this will be reflected in the counters that Nighthawk writes to the output. Default is false. +--request-source-plugin-config +[Request +Source](https://github.com/envoyproxy/nighthawk/blob/master/docs/root/ +overview.md#requestsource) plugin configuration in json or compact +yaml. Mutually exclusive with --request-source. Example (json): +{name:"nighthawk.stub-request-source-plugin" +,typed_config:{"@type":"type.googleapis.com/nighthawk.request_source.S +tubPluginConfig",test_value:"3"}} + --request-source Remote gRPC source that will deliver to-be-replayed traffic. Each worker will separately connect to this source. For example -grpc://127.0.0.1:8443/. +grpc://127.0.0.1:8443/. Mutually exclusive with +--request_source_plugin_config. --label (accepted multiple times) Label. Allows specifying multiple labels which will be persisted in diff --git a/api/client/options.proto b/api/client/options.proto index b5955a246..13d7f1819 100644 --- a/api/client/options.proto +++ b/api/client/options.proto @@ -7,6 +7,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/config/core/v3/extension.proto"; import "validate/validate.proto"; // Allows for static configuration of requests that should be send by the load generator. @@ -104,6 +105,7 @@ message H1ConnectionReuseStrategy { // TODO(oschaaf): Ultimately this will be a load test specification. The fact that it // can arrive via CLI is just a concrete detail. Change this to reflect that. +// highest unused number is 38 message CommandLineOptions { // The target requests-per-second rate. Default: 5. google.protobuf.UInt32Value requests_per_second = 1 @@ -148,6 +150,9 @@ message CommandLineOptions { // Remote gRPC source that will deliver to-be-replayed traffic. Each worker will separately // connect to this source. RequestSource request_source = 26; + // A plugin config that is to be parsed by a RequestSourcePluginConfigFactory and used to create + // an in memory request source. + envoy.config.core.v3.TypedExtensionConfig request_source_plugin_config = 37; } // DEPRECATED, use --transport-socket instead. Tls context configuration in json or compact yaml. // Mutually exclusive with --transport-socket. diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 0141ad83c..e402eccd8 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -47,7 +47,8 @@ function do_unit_test_coverage() { function do_integration_test_coverage() { export TEST_TARGETS="//test:python_test" - export COVERAGE_THRESHOLD=78.6 + #TODO(#564): Revert this to 78.6 + export COVERAGE_THRESHOLD=75.0 echo "bazel coverage build with tests ${TEST_TARGETS}" test/run_nighthawk_bazel_coverage.sh ${TEST_TARGETS} exit 0 diff --git a/include/nighthawk/client/options.h b/include/nighthawk/client/options.h index 6e73637df..c87b03c5f 100644 --- a/include/nighthawk/client/options.h +++ b/include/nighthawk/client/options.h @@ -54,6 +54,8 @@ class Options { virtual nighthawk::client::SequencerIdleStrategy::SequencerIdleStrategyOptions sequencerIdleStrategy() const PURE; virtual std::string requestSource() const PURE; + virtual const absl::optional& + requestSourcePluginConfig() const PURE; virtual std::string trace() const PURE; virtual nighthawk::client::H1ConnectionReuseStrategy::H1ConnectionReuseStrategyOptions h1ConnectionReuseStrategy() const PURE; diff --git a/source/client/BUILD b/source/client/BUILD index 4723f1ac7..b89c90bd7 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -49,6 +49,7 @@ envoy_cc_library( "//source/common:request_source_impl_lib", "//source/common:nighthawk_common_lib", "//source/common:nighthawk_service_client_impl", + "//source/request_source:request_options_list_plugin_impl", "@envoy//source/common/common:random_generator_lib_with_external_headers", "@envoy//source/common/access_log:access_log_manager_lib_with_external_headers", "@envoy//source/common/api:api_lib_with_external_headers", @@ -90,6 +91,7 @@ envoy_cc_library( "@envoy//source/server:server_lib_with_external_headers", "@envoy//source/server/config_validation:admin_lib_with_external_headers", "@envoy//include/envoy/http:protocol_interface_with_external_headers", + "@envoy//source/common/common:statusor_lib_with_external_headers", ] + select({ "//bazel:zipkin_disabled": [], "//conditions:default": [ diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index 0b3644f9d..63ae22da6 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -169,15 +169,15 @@ RequestSourceFactoryImpl::create(const Envoy::Upstream::ClusterManagerPtr& clust setRequestHeader(*header, option_header.header().key(), option_header.header().value()); } - if (options_.requestSource() == "") { - return std::make_unique(std::move(header)); - } else { + if (!options_.requestSource().empty()) { RELEASE_ASSERT(!service_cluster_name.empty(), "expected cluster name to be set"); // We pass in options_.requestsPerSecond() as the header buffer length so the grpc client // will shoot for maintaining an amount of headers of at least one second. return std::make_unique(cluster_manager, dispatcher, scope, service_cluster_name, std::move(header), options_.requestsPerSecond()); + } else { + return std::make_unique(std::move(header)); } } diff --git a/source/client/options_impl.cc b/source/client/options_impl.cc index 9dec9f379..acac38d80 100644 --- a/source/client/options_impl.cc +++ b/source/client/options_impl.cc @@ -263,9 +263,19 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { TCLAP::ValueArg request_source( "", "request-source", "Remote gRPC source that will deliver to-be-replayed traffic. Each worker will separately " - "connect to this source. For example grpc://127.0.0.1:8443/.", + "connect to this source. For example grpc://127.0.0.1:8443/. " + "Mutually exclusive with --request_source_plugin_config.", false, "", "uri format", cmd); - + TCLAP::ValueArg request_source_plugin_config( + "", "request-source-plugin-config", + "[Request " + "Source](https://github.com/envoyproxy/nighthawk/blob/master/docs/root/" + "overview.md#requestsource) plugin configuration in json or compact yaml. " + "Mutually exclusive with --request-source. Example (json): " + "{name:\"nighthawk.stub-request-source-plugin\",typed_config:{" + "\"@type\":\"type.googleapis.com/nighthawk.request_source.StubPluginConfig\"," + "test_value:\"3\"}}", + false, "", "string", cmd); TCLAP::SwitchArg simple_warmup( "", "simple-warmup", "Perform a simple single warmup request (per worker) before starting execution. Note that " @@ -496,6 +506,21 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { throw MalformedArgvException(e.what()); } } + if (!request_source.getValue().empty() && !request_source_plugin_config.getValue().empty()) { + throw MalformedArgvException( + "--request-source and --request_source_plugin_config cannot both be set."); + } + if (!request_source_plugin_config.getValue().empty()) { + try { + request_source_plugin_config_.emplace(envoy::config::core::v3::TypedExtensionConfig()); + Envoy::MessageUtil::loadFromJson(request_source_plugin_config.getValue(), + request_source_plugin_config_.value(), + Envoy::ProtobufMessage::getStrictValidationVisitor()); + } catch (const Envoy::EnvoyException& e) { + throw MalformedArgvException(e.what()); + } + } + validate(); } @@ -570,6 +595,9 @@ OptionsImpl::OptionsImpl(const nighthawk::client::CommandLineOptions& options) { } else if (options.has_request_source()) { const auto& request_source_options = options.request_source(); request_source_ = request_source_options.uri(); + } else if (options.has_request_source_plugin_config()) { + request_source_plugin_config_.emplace(envoy::config::core::v3::TypedExtensionConfig()); + request_source_plugin_config_.value().MergeFrom(options.request_source_plugin_config()); } max_pending_requests_ = @@ -730,6 +758,9 @@ CommandLineOptionsPtr OptionsImpl::toCommandLineOptionsInternal() const { if (requestSource() != "") { auto request_source = command_line_options->mutable_request_source(); *request_source->mutable_uri() = request_source_; + } else if (request_source_plugin_config_.has_value()) { + *(command_line_options->mutable_request_source_plugin_config()) = + request_source_plugin_config_.value(); } else { auto request_options = command_line_options->mutable_request_options(); request_options->set_request_method(request_method_); diff --git a/source/client/options_impl.h b/source/client/options_impl.h index af529f7b8..c43c211a3 100644 --- a/source/client/options_impl.h +++ b/source/client/options_impl.h @@ -60,6 +60,11 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable& + requestSourcePluginConfig() const override { + return request_source_plugin_config_; + } + std::string trace() const override { return trace_; } nighthawk::client::H1ConnectionReuseStrategy::H1ConnectionReuseStrategyOptions h1ConnectionReuseStrategy() const override { @@ -116,6 +121,8 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable transport_socket_; + absl::optional request_source_plugin_config_; + uint32_t max_pending_requests_{0}; // This default is based the minimum recommendation for SETTINGS_MAX_CONCURRENT_STREAMS over at // https://tools.ietf.org/html/rfc7540#section-6.5.2 diff --git a/source/request_source/request_options_list_plugin_impl.cc b/source/request_source/request_options_list_plugin_impl.cc index bdb9a3fbc..acc459e3c 100644 --- a/source/request_source/request_options_list_plugin_impl.cc +++ b/source/request_source/request_options_list_plugin_impl.cc @@ -25,8 +25,8 @@ RequestSourcePtr FileBasedOptionsListRequestSourceFactory::createRequestSourcePl const auto& any = dynamic_cast(message); nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config; Envoy::MessageUtil util; - uint32_t max_file_size = config.has_max_file_size() ? config.max_file_size().value() : 1000000; util.unpackTo(any, config); + uint32_t max_file_size = config.has_max_file_size() ? config.max_file_size().value() : 1000000; if (api.fileSystem().fileSize(config.file_path()) > max_file_size) { throw NighthawkException("file size must be less than max_file_size"); } diff --git a/test/mocks/client/mock_options.h b/test/mocks/client/mock_options.h index 258904cd5..be3c6635a 100644 --- a/test/mocks/client/mock_options.h +++ b/test/mocks/client/mock_options.h @@ -36,6 +36,8 @@ class MockOptions : public Options { MOCK_CONST_METHOD0(sequencerIdleStrategy, nighthawk::client::SequencerIdleStrategy::SequencerIdleStrategyOptions()); MOCK_CONST_METHOD0(requestSource, std::string()); + MOCK_CONST_METHOD0(requestSourcePluginConfig, + absl::optional&()); MOCK_CONST_METHOD0(trace, std::string()); MOCK_CONST_METHOD0( h1ConnectionReuseStrategy, diff --git a/test/options_test.cc b/test/options_test.cc index ddbb80595..6c8a5a8ae 100644 --- a/test/options_test.cc +++ b/test/options_test.cc @@ -3,6 +3,7 @@ #include "client/options_impl.h" #include "test/client/utility.h" +#include "test/test_common/environment.h" #include "gtest/gtest.h" @@ -30,7 +31,6 @@ class OptionsImplTest : public Test { EXPECT_EQ(expected_key, headers[0].header().key()); EXPECT_EQ(expected_value, headers[0].header().value()); } - std::string client_name_; std::string good_test_uri_; std::string no_arg_match_; @@ -249,7 +249,7 @@ TEST_F(OptionsImplTest, AlmostAll) { EXPECT_TRUE(util(cmd->stats_sinks(0), options->statsSinks()[0])); EXPECT_TRUE(util(cmd->stats_sinks(1), options->statsSinks()[1])); EXPECT_EQ(cmd->latency_response_header_name().value(), options->responseHeaderWithLatencyInput()); - + // TODO(#433) Here and below, replace comparisons once we choose a proto diff. OptionsImpl options_from_proto(*cmd); std::string s1 = Envoy::MessageUtil::getYamlStringFromMessage( *(options_from_proto.toCommandLineOptions()), true, true); @@ -272,10 +272,122 @@ TEST_F(OptionsImplTest, RequestSource) { // Check that our conversion to CommandLineOptionsPtr makes sense. CommandLineOptionsPtr cmd = options->toCommandLineOptions(); EXPECT_EQ(cmd->request_source().uri(), request_source); + // TODO(#433) OptionsImpl options_from_proto(*cmd); EXPECT_TRUE(util(*(options_from_proto.toCommandLineOptions()), *cmd)); } +class RequestSourcePluginTestFixture : public OptionsImplTest, + public WithParamInterface {}; +TEST_P(RequestSourcePluginTestFixture, CreatesOptionsImplWithRequestSourceConfig) { + Envoy::MessageUtil util; + const std::string request_source_config = GetParam(); + std::unique_ptr options = TestUtility::createOptionsImpl( + fmt::format("{} --request-source-plugin-config {} {}", client_name_, request_source_config, + good_test_uri_)); + + CommandLineOptionsPtr command = options->toCommandLineOptions(); + EXPECT_TRUE( + util(command->request_source_plugin_config(), options->requestSourcePluginConfig().value())); + + // The predicates are defined as proto maps, and these seem to re-serialize into a different + // order. Hence we trim the maps to contain a single entry so they don't thwart our textual + // comparison below. + EXPECT_EQ(1, command->mutable_failure_predicates()->erase("benchmark.http_4xx")); + EXPECT_EQ(1, command->mutable_failure_predicates()->erase("benchmark.http_5xx")); + EXPECT_EQ(1, command->mutable_failure_predicates()->erase("requestsource.upstream_rq_5xx")); + + // TODO(#433) + // Now we construct a new options from the proto we created above. This should result in an + // OptionsImpl instance equivalent to options. We test that by converting both to yaml strings, + // expecting them to be equal. This should provide helpful output when the test fails by showing + // the unexpected (yaml) diff. + OptionsImpl options_from_proto(*command); + std::string yaml_for_options_proto = Envoy::MessageUtil::getYamlStringFromMessage( + *(options_from_proto.toCommandLineOptions()), true, true); + std::string yaml_for_command = Envoy::MessageUtil::getYamlStringFromMessage(*command, true, true); + EXPECT_EQ(yaml_for_options_proto, yaml_for_command); + // Additional comparison to avoid edge cases missed. + EXPECT_TRUE(util(*(options_from_proto.toCommandLineOptions()), *command)); +} +std::vector RequestSourcePluginJsons() { + std::string file_request_source_plugin_json = + "{" + R"(name:"nighthawk.file-based-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/)" + R"(nighthawk.request_source.FileBasedOptionsListRequestSourceConfig",)" + R"(file_path:")" + + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml") + + "\"," + "}" + "}"; + std::string in_line_request_source_plugin_json = + "{" + R"(name:"nighthawk.in-line-options-list-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/)" + R"(nighthawk.request_source.InLineOptionsListRequestSourceConfig",)" + "options_list:{" + R"(options:[{request_method:"1",request_headers:[{header:{key:"key",value:"value"}}]}])" + "}," + "}" + "}"; + std::string stub_request_source_plugin_json = + "{" + R"(name:"nighthawk.stub-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/nighthawk.request_source.StubPluginConfig",)" + R"(test_value:"3",)" + "}" + "}"; + return std::vector{ + file_request_source_plugin_json, + in_line_request_source_plugin_json, + stub_request_source_plugin_json, + }; +} +INSTANTIATE_TEST_SUITE_P(HappyPathRequestSourceConfigJsonSuccessfullyTranslatesIntoOptions, + RequestSourcePluginTestFixture, + ::testing::ValuesIn(RequestSourcePluginJsons())); + +// This test covers --RequestSourcePlugin, which can't be tested at the same time as --RequestSource +// and some other options. This is the test for the inlineoptionslistplugin. +TEST_F(OptionsImplTest, InLineOptionsListRequestSourcePluginIsMutuallyExclusiveWithRequestSource) { + const std::string request_source = "127.9.9.4:32323"; + const std::string request_source_config = + "{" + "name:\"nighthawk.in-line-options-list-request-source-plugin\"," + "typed_config:{" + "\"@type\":\"type.googleapis.com/" + "nighthawk.request_source.InLineOptionsListRequestSourceConfig\"," + "options_list:{" + "options:[{request_method:\"1\",request_headers:[{header:{key:\"key\",value:\"value\"}}]}]" + "}," + "}" + "}"; + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl( + fmt::format("{} --request-source-plugin-config {} --request-source {} {}", client_name_, + request_source_config, request_source, good_test_uri_)), + MalformedArgvException, + "--request-source and --request_source_plugin_config cannot both be set."); +} + +TEST_F(OptionsImplTest, BadRequestSourcePluginSpecification) { + // Bad JSON + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl(fmt::format("{} --request-source-plugin-config {} {}", + client_name_, "{broken_json:", good_test_uri_)), + MalformedArgvException, "Unable to parse JSON as proto"); + // Correct JSON, but contents not according to spec. + EXPECT_THROW_WITH_REGEX(TestUtility::createOptionsImpl( + fmt::format("{} --request-source-plugin-config {} {}", client_name_, + "{misspelled_field:{}}", good_test_uri_)), + MalformedArgvException, + "envoy.config.core.v3.TypedExtensionConfig reason INVALID_ARGUMENT"); +} + // We test --no-duration here and not in All above because it is exclusive to --duration. TEST_F(OptionsImplTest, NoDuration) { Envoy::MessageUtil util; @@ -284,6 +396,7 @@ TEST_F(OptionsImplTest, NoDuration) { EXPECT_TRUE(options->noDuration()); // Check that our conversion to CommandLineOptionsPtr makes sense. CommandLineOptionsPtr cmd = options->toCommandLineOptions(); + // TODO(#433) OptionsImpl options_from_proto(*cmd); EXPECT_TRUE(util(*(options_from_proto.toCommandLineOptions()), *cmd)); } @@ -324,7 +437,7 @@ TEST_F(OptionsImplTest, TlsContext) { EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_4xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_5xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("requestsource.upstream_rq_5xx")); - + // TODO(#433) OptionsImpl options_from_proto(*cmd); std::string s1 = Envoy::MessageUtil::getYamlStringFromMessage( *(options_from_proto.toCommandLineOptions()), true, true); @@ -386,7 +499,7 @@ TEST_F(OptionsImplTest, MultiTarget) { EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_4xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("benchmark.http_5xx")); EXPECT_EQ(1, cmd->mutable_failure_predicates()->erase("requestsource.upstream_rq_5xx")); - + // TODO(#433) OptionsImpl options_from_proto(*cmd); std::string s1 = Envoy::MessageUtil::getYamlStringFromMessage( *(options_from_proto.toCommandLineOptions()), true, true); diff --git a/test/request_source/request_source_plugin_test.cc b/test/request_source/request_source_plugin_test.cc index eee187522..04140a92a 100644 --- a/test/request_source/request_source_plugin_test.cc +++ b/test/request_source/request_source_plugin_test.cc @@ -87,6 +87,7 @@ TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPlugi auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr plugin = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); EXPECT_NE(dynamic_cast(plugin.get()), nullptr); } TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesWorkingPlugin) { @@ -101,6 +102,7 @@ TEST_F(StubRequestSourcePluginTest, CreateRequestSourcePluginCreatesWorkingPlugi auto template_header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr plugin = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(template_header)); + plugin->initOnThread(); Nighthawk::RequestGenerator generator = plugin->get(); Nighthawk::RequestPtr request = generator(); Nighthawk::HeaderMapPtr header = request->header(); @@ -140,6 +142,7 @@ TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrect auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr plugin = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); EXPECT_NE(dynamic_cast(plugin.get()), nullptr); } @@ -157,6 +160,7 @@ TEST_F(FileBasedRequestSourcePluginTest, auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr file_based_request_source = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + file_based_request_source->initOnThread(); Nighthawk::RequestGenerator generator = file_based_request_source->get(); Nighthawk::RequestPtr request1 = generator(); Nighthawk::RequestPtr request2 = generator(); @@ -171,6 +175,24 @@ TEST_F(FileBasedRequestSourcePluginTest, EXPECT_EQ(request3, nullptr); } +TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginWithTooLargeAFileThrowsAnError) { + nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = + MakeFileBasedPluginConfigWithTestYaml( + TestEnvironment::runfilesPath("test/request_source/test_data/test-config.yaml")); + const uint32_t max_file_size = 10; + config.set_num_requests(2); + config.mutable_max_file_size()->set_value(max_file_size); + Envoy::ProtobufWkt::Any config_any; + config_any.PackFrom(config); + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + "nighthawk.file-based-request-source-plugin"); + auto header = Envoy::Http::RequestHeaderMapImpl::create(); + EXPECT_THROW_WITH_REGEX( + config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)), + NighthawkException, "file size must be less than max_file_size"); +} + TEST_F(FileBasedRequestSourcePluginTest, CreateRequestSourcePluginWithMoreNumRequestsThanInFileGetsRequestGeneratorThatLoops) { nighthawk::request_source::FileBasedOptionsListRequestSourceConfig config = @@ -184,6 +206,7 @@ TEST_F(FileBasedRequestSourcePluginTest, auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr file_based_request_source = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + file_based_request_source->initOnThread(); Nighthawk::RequestGenerator generator = file_based_request_source->get(); Nighthawk::RequestPtr request1 = generator(); Nighthawk::RequestPtr request2 = generator(); @@ -239,6 +262,7 @@ TEST_F(InLineRequestSourcePluginTest, CreateRequestSourcePluginCreatesCorrectPlu auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr plugin = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); EXPECT_NE(dynamic_cast(plugin.get()), nullptr); } @@ -262,6 +286,7 @@ TEST_F(InLineRequestSourcePluginTest, auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr plugin = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); Nighthawk::RequestGenerator generator = plugin->get(); Nighthawk::RequestPtr request1 = generator(); Nighthawk::RequestPtr request2 = generator(); @@ -296,6 +321,7 @@ TEST_F(InLineRequestSourcePluginTest, auto header = Envoy::Http::RequestHeaderMapImpl::create(); RequestSourcePtr plugin = config_factory.createRequestSourcePlugin(config_any, *api_, std::move(header)); + plugin->initOnThread(); Nighthawk::RequestGenerator generator = plugin->get(); Nighthawk::RequestPtr request1 = generator(); Nighthawk::RequestPtr request2 = generator(); From 45c62059c39bf975246d33065678798f646f5bd2 Mon Sep 17 00:00:00 2001 From: Kush Trivedi <44091822+kushthedude@users.noreply.github.com> Date: Wed, 11 Nov 2020 08:31:35 +0530 Subject: [PATCH 33/63] chore: enhance naming of bool maybeSendErrorReply (#568) Signed-off-by: Kush Trivedi --- source/server/http_dynamic_delay_filter.cc | 4 ++-- source/server/http_filter_config_base.cc | 2 +- source/server/http_filter_config_base.h | 2 +- source/server/http_test_server_filter.cc | 4 ++-- source/server/http_time_tracking_filter.cc | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/source/server/http_dynamic_delay_filter.cc b/source/server/http_dynamic_delay_filter.cc index ebc3254fc..9536b8434 100644 --- a/source/server/http_dynamic_delay_filter.cc +++ b/source/server/http_dynamic_delay_filter.cc @@ -47,7 +47,7 @@ HttpDynamicDelayDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& head maybeRequestFaultFilterDelay(delay_ms, headers); } else { if (end_stream) { - config_->maybeSendErrorReply(*decoder_callbacks_); + config_->validateOrSendError(*decoder_callbacks_); return Envoy::Http::FilterHeadersStatus::StopIteration; } return Envoy::Http::FilterHeadersStatus::Continue; @@ -59,7 +59,7 @@ Envoy::Http::FilterDataStatus HttpDynamicDelayDecoderFilter::decodeData(Envoy::Buffer::Instance& buffer, bool end_stream) { if (!config_->getEffectiveConfiguration().ok()) { if (end_stream) { - config_->maybeSendErrorReply(*decoder_callbacks_); + config_->validateOrSendError(*decoder_callbacks_); return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; } return Envoy::Http::FilterDataStatus::Continue; diff --git a/source/server/http_filter_config_base.cc b/source/server/http_filter_config_base.cc index b76505023..0c8396139 100644 --- a/source/server/http_filter_config_base.cc +++ b/source/server/http_filter_config_base.cc @@ -33,7 +33,7 @@ void FilterConfigurationBase::computeEffectiveConfiguration( } } -bool FilterConfigurationBase::maybeSendErrorReply( +bool FilterConfigurationBase::validateOrSendError( Envoy::Http::StreamDecoderFilterCallbacks& decoder_callbacks) const { if (!effective_config_.ok()) { decoder_callbacks.sendLocalReply(static_cast(500), diff --git a/source/server/http_filter_config_base.h b/source/server/http_filter_config_base.h index 9f3f700fe..7569df17a 100644 --- a/source/server/http_filter_config_base.h +++ b/source/server/http_filter_config_base.h @@ -52,7 +52,7 @@ class FilterConfigurationBase { * @param decoder_callbacks Decoder used to generate the reply. * @return true iff an error reply was generated. */ - bool maybeSendErrorReply(Envoy::Http::StreamDecoderFilterCallbacks& decoder_callbacks) const; + bool validateOrSendError(Envoy::Http::StreamDecoderFilterCallbacks& decoder_callbacks) const; /** * @brief Get the effective configuration. Depending on state ,this could be one of static diff --git a/source/server/http_test_server_filter.cc b/source/server/http_test_server_filter.cc index 5dd51851a..bac840bc1 100644 --- a/source/server/http_test_server_filter.cc +++ b/source/server/http_test_server_filter.cc @@ -40,7 +40,7 @@ HttpTestServerDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& header bool end_stream) { config_->computeEffectiveConfiguration(headers); if (end_stream) { - if (!config_->maybeSendErrorReply(*decoder_callbacks_)) { + if (!config_->validateOrSendError(*decoder_callbacks_)) { const absl::StatusOr effective_config = config_->getEffectiveConfiguration(); if (effective_config.value()->echo_request_headers()) { @@ -57,7 +57,7 @@ HttpTestServerDecoderFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& header Envoy::Http::FilterDataStatus HttpTestServerDecoderFilter::decodeData(Envoy::Buffer::Instance&, bool end_stream) { if (end_stream) { - if (!config_->maybeSendErrorReply(*decoder_callbacks_)) { + if (!config_->validateOrSendError(*decoder_callbacks_)) { sendReply(*config_->getEffectiveConfiguration().value()); } } diff --git a/source/server/http_time_tracking_filter.cc b/source/server/http_time_tracking_filter.cc index aba57d46f..a6d787f02 100644 --- a/source/server/http_time_tracking_filter.cc +++ b/source/server/http_time_tracking_filter.cc @@ -31,7 +31,7 @@ HttpTimeTrackingFilter::HttpTimeTrackingFilter(HttpTimeTrackingFilterConfigShare Envoy::Http::FilterHeadersStatus HttpTimeTrackingFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bool end_stream) { config_->computeEffectiveConfiguration(headers); - if (end_stream && config_->maybeSendErrorReply(*decoder_callbacks_)) { + if (end_stream && config_->validateOrSendError(*decoder_callbacks_)) { return Envoy::Http::FilterHeadersStatus::StopIteration; } return Envoy::Http::FilterHeadersStatus::Continue; @@ -39,7 +39,7 @@ HttpTimeTrackingFilter::decodeHeaders(Envoy::Http::RequestHeaderMap& headers, bo Envoy::Http::FilterDataStatus HttpTimeTrackingFilter::decodeData(Envoy::Buffer::Instance&, bool end_stream) { - if (end_stream && config_->maybeSendErrorReply(*decoder_callbacks_)) { + if (end_stream && config_->validateOrSendError(*decoder_callbacks_)) { return Envoy::Http::FilterDataStatus::StopIterationNoBuffer; } return Envoy::Http::FilterDataStatus::Continue; From 683ef6224a50ef5ff2e46166ca1c955ec863b165 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 11 Nov 2020 16:29:01 +0100 Subject: [PATCH 34/63] CI: make clang-tidy always pass. (#570) As discussed yestereday, make clang-tidy ignore failures to stop the excessive flaking from being a blocker to merge. Signed-off-by: Otto van der Schaaf --- ci/do_ci.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index e402eccd8..58a7bdab2 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -34,7 +34,8 @@ function do_test() { } function do_clang_tidy() { - ci/run_clang_tidy.sh + # TODO(#546): deflake clang tidy runs, and remove '|| true' here. + ci/run_clang_tidy.sh || true } function do_unit_test_coverage() { From 350c2d39740c6d3f8e9e9c0212d8b3a97c3c26ff Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 11 Nov 2020 17:55:43 +0100 Subject: [PATCH 35/63] Update Envoy to 8e6b176b89240d1b8ce3f3e4a8e276e4a40fcd1e (#571) - Handle return value of encodeHeaders() now that there is one. - MetricSnapshotImpl now needs a TimeSource in its constructor. - Avoid assert, use valid request header in StreamDecoderTest Signed-off-by: Otto van der Schaaf --- .bazelrc | 2 +- .circleci/config.yml | 4 ++-- bazel/repositories.bzl | 4 ++-- source/client/flush_worker_impl.cc | 2 +- source/client/stream_decoder.cc | 9 ++++++++- test/benchmark_http_client_test.cc | 1 + test/stream_decoder_test.cc | 3 ++- 7 files changed, 17 insertions(+), 8 deletions(-) diff --git a/.bazelrc b/.bazelrc index 0f66d819b..525535336 100644 --- a/.bazelrc +++ b/.bazelrc @@ -254,7 +254,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:19a268cfe3d12625380e7c61d2467c8779b58b56 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 8559d09e4..a1415233a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ references: - envoy-build-image: &envoy-build-image # October 12th, 2020 - envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a + envoy-build-image: &envoy-build-image # November 10th, 2020 + envoyproxy/envoy-build-ubuntu:19a268cfe3d12625380e7c61d2467c8779b58b56 version: 2 jobs: build: diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index c4fb1f405..1592a3271 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "0226d0e084d832ce24ee6303f5cb2fc01ec4970b" # November 2nd, 2020 -ENVOY_SHA = "ed91cf0946155aac81c4601d55d81ba10d9189628259846c62f84fbe5a9059c0" +ENVOY_COMMIT = "8e6b176b89240d1b8ce3f3e4a8e276e4a40fcd1e" # November 10th, 2020 +ENVOY_SHA = "1eba9e904699bbc43c708f90c9e7b1354aed7bafe3784be2c6bfa04919cc67eb" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/flush_worker_impl.cc b/source/client/flush_worker_impl.cc index a1a5bd26b..d369a74ae 100644 --- a/source/client/flush_worker_impl.cc +++ b/source/client/flush_worker_impl.cc @@ -37,7 +37,7 @@ void FlushWorkerImpl::flushStats() { // Create a snapshot and flush to all sinks. Even if there are no sinks, // creating the snapshot has the important property that it latches all counters on a periodic // basis. - Envoy::Server::MetricSnapshotImpl snapshot(store_); + Envoy::Server::MetricSnapshotImpl snapshot(store_, time_source_); for (std::unique_ptr& sink : stats_sinks_) { sink->flush(snapshot); } diff --git a/source/client/stream_decoder.cc b/source/client/stream_decoder.cc index 86f4c681d..4c935e959 100644 --- a/source/client/stream_decoder.cc +++ b/source/client/stream_decoder.cc @@ -107,7 +107,14 @@ void StreamDecoder::onPoolReady(Envoy::Http::RequestEncoder& encoder, // Make sure we hear about stream resets on the encoder. encoder.getStream().addCallbacks(*this); upstream_timing_.onFirstUpstreamTxByteSent(time_source_); // XXX(oschaaf): is this correct? - encoder.encodeHeaders(*request_headers_, request_body_size_ == 0); + const Envoy::Http::Status status = + encoder.encodeHeaders(*request_headers_, request_body_size_ == 0); + if (!status.ok()) { + ENVOY_LOG_EVERY_POW_2(error, + "Request header encoding failure. Might be missing one or more required " + "HTTP headers in {}.", + request_headers_); + } if (request_body_size_ > 0) { // TODO(https://github.com/envoyproxy/nighthawk/issues/138): This will show up in the zipkin UI // as 'response_size'. We add it here, optimistically assuming it will all be send. Ideally, diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index f61f0f8a6..e92cbaf13 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -110,6 +110,7 @@ class BenchmarkClientHttpTest : public Test { .WillByDefault( WithArgs<0>(([&called_headers](const Envoy::Http::RequestHeaderMap& specific_request) { called_headers.insert(getPathFromRequest(specific_request)); + return Envoy::Http::Status(); }))); EXPECT_CALL(pool_, newStream(_, _)) diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index 4603c0e65..cd7630389 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -26,7 +26,8 @@ class StreamDecoderTest : public Test, public StreamDecoderCompletionCallback { : api_(Envoy::Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), request_headers_(std::make_shared( - std::initializer_list>({{":method", "GET"}}))), + std::initializer_list>( + {{":method", "GET"}, {":path", "/foo"}}))), http_tracer_(std::make_unique()), test_header_(std::make_unique( std::initializer_list>({{":status", "200"}}))), From d63ade72f3801cbe5fe9f069616502736a456a3b Mon Sep 17 00:00:00 2001 From: wjuan-AFK <66322422+wjuan-AFK@users.noreply.github.com> Date: Wed, 11 Nov 2020 13:09:38 -0500 Subject: [PATCH 36/63] Factory plugin implementation (#566) Adding implementation in the factory_impl class for loading request source plugins. This also introduces python tests for the previous PR and undoes the temporary hack to reduce test_integration_coverage threshold in issue #564. Signed-off-by: William Juan <66322422+wjuan-AFK@users.noreply.github.com> --- source/client/factories_impl.cc | 31 ++++- source/client/factories_impl.h | 22 +++- source/client/process_impl.cc | 3 +- test/BUILD | 1 + test/factories_test.cc | 114 +++++++++++++++++- .../integration/test_request_source_plugin.py | 59 +++++++++ .../request_source/test_data/test-config.yaml | 18 ++- tools/format_python_tools.sh | 3 +- 8 files changed, 231 insertions(+), 20 deletions(-) create mode 100644 test/integration/test_request_source_plugin.py diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index 63ae22da6..01ae6c50d 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -17,6 +17,8 @@ #include "client/output_collector_impl.h" #include "client/output_formatter_impl.h" +#include "request_source/request_options_list_plugin_impl.h" + using namespace std::chrono_literals; namespace Nighthawk { @@ -117,8 +119,8 @@ OutputFormatterPtr OutputFormatterFactoryImpl::create( } } -RequestSourceFactoryImpl::RequestSourceFactoryImpl(const Options& options) - : OptionBasedFactoryImpl(options) {} +RequestSourceFactoryImpl::RequestSourceFactoryImpl(const Options& options, Envoy::Api::Api& api) + : OptionBasedFactoryImpl(options), api_(api) {} void RequestSourceFactoryImpl::setRequestHeader(Envoy::Http::RequestHeaderMap& header, absl::string_view key, @@ -168,7 +170,6 @@ RequestSourceFactoryImpl::create(const Envoy::Upstream::ClusterManagerPtr& clust for (const auto& option_header : request_options.request_headers()) { setRequestHeader(*header, option_header.header().key(), option_header.header().value()); } - if (!options_.requestSource().empty()) { RELEASE_ASSERT(!service_cluster_name.empty(), "expected cluster name to be set"); // We pass in options_.requestsPerSecond() as the header buffer length so the grpc client @@ -176,10 +177,34 @@ RequestSourceFactoryImpl::create(const Envoy::Upstream::ClusterManagerPtr& clust return std::make_unique(cluster_manager, dispatcher, scope, service_cluster_name, std::move(header), options_.requestsPerSecond()); + } else if (options_.requestSourcePluginConfig().has_value()) { + absl::StatusOr plugin_or = LoadRequestSourcePlugin( + options_.requestSourcePluginConfig().value(), api_, std::move(header)); + if (!plugin_or.ok()) { + throw NighthawkException( + absl::StrCat("Request Source plugin loading error should have been caught " + "during input validation: ", + plugin_or.status().message())); + } + RequestSourcePtr request_source = std::move(plugin_or.value()); + return request_source; } else { return std::make_unique(std::move(header)); } } +absl::StatusOr RequestSourceFactoryImpl::LoadRequestSourcePlugin( + const envoy::config::core::v3::TypedExtensionConfig& config, Envoy::Api::Api& api, + Envoy::Http::RequestHeaderMapPtr header) const { + try { + auto& config_factory = + Envoy::Config::Utility::getAndCheckFactoryByName( + config.name()); + return config_factory.createRequestSourcePlugin(config.typed_config(), api, std::move(header)); + } catch (const Envoy::EnvoyException& e) { + return absl::InvalidArgumentError( + absl::StrCat("Could not load plugin: ", config.name(), ": ", e.what())); + } +} TerminationPredicateFactoryImpl::TerminationPredicateFactoryImpl(const Options& options) : OptionBasedFactoryImpl(options) {} diff --git a/source/client/factories_impl.h b/source/client/factories_impl.h index fa7e6c3cd..2c8abfd6d 100644 --- a/source/client/factories_impl.h +++ b/source/client/factories_impl.h @@ -9,6 +9,9 @@ #include "nighthawk/common/termination_predicate.h" #include "nighthawk/common/uri.h" +#include "external/envoy/source/common/common/statusor.h" +#include "external/envoy/source/common/config/utility.h" + #include "common/platform_util_impl.h" namespace Nighthawk { @@ -58,14 +61,31 @@ class OutputFormatterFactoryImpl : public OutputFormatterFactory { class RequestSourceFactoryImpl : public OptionBasedFactoryImpl, public RequestSourceFactory { public: - RequestSourceFactoryImpl(const Options& options); + RequestSourceFactoryImpl(const Options& options, Envoy::Api::Api& api); RequestSourcePtr create(const Envoy::Upstream::ClusterManagerPtr& cluster_manager, Envoy::Event::Dispatcher& dispatcher, Envoy::Stats::Scope& scope, absl::string_view service_cluster_name) const override; private: + Envoy::Api::Api& api_; void setRequestHeader(Envoy::Http::RequestHeaderMap& header, absl::string_view key, absl::string_view value) const; + /** + * Instantiates a RequestSource using a RequestSourcePluginFactory based on the plugin name in + * |config|, unpacking the plugin-specific config proto within |config|. Validates the config + * proto. + * + * @param config Proto containing plugin name and plugin-specific config proto. + * @param api Api parameter that contains timesystem, filesystem, and threadfactory. + * @param header Any headers in request specifiers yielded by the request + * source plugin will override what is specified here. + + * @return absl::StatusOr Initialized plugin or error status due to missing + * plugin or config proto validation error. + */ + absl::StatusOr + LoadRequestSourcePlugin(const envoy::config::core::v3::TypedExtensionConfig& config, + Envoy::Api::Api& api, Envoy::Http::RequestHeaderMapPtr header) const; }; class TerminationPredicateFactoryImpl : public OptionBasedFactoryImpl, diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 516a76f92..08cbaefe5 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -104,7 +104,8 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ generator_)), dispatcher_(api_->allocateDispatcher("main_thread")), benchmark_client_factory_(options), termination_predicate_factory_(options), sequencer_factory_(options), - request_generator_factory_(options), options_(options), init_manager_("nh_init_manager"), + request_generator_factory_(options, *api_), options_(options), + init_manager_("nh_init_manager"), local_info_(new Envoy::LocalInfo::LocalInfoImpl( {}, Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), "nighthawk_service_zone", "nighthawk_service_cluster", "nighthawk_service_node")), diff --git a/test/BUILD b/test/BUILD index ecb8d5039..326d971ee 100644 --- a/test/BUILD +++ b/test/BUILD @@ -87,6 +87,7 @@ envoy_cc_test( "//test/mocks/client:mock_benchmark_client", "//test/mocks/client:mock_options", "//test/mocks/common:mock_termination_predicate", + "//test/test_common:environment_lib", "@envoy//test/mocks/event:event_mocks", "@envoy//test/mocks/tracing:tracing_mocks", "@envoy//test/test_common:simulated_time_system_lib", diff --git a/test/factories_test.cc b/test/factories_test.cc index 1a410496a..21cf49c9e 100644 --- a/test/factories_test.cc +++ b/test/factories_test.cc @@ -11,6 +11,7 @@ #include "test/mocks/client/mock_benchmark_client.h" #include "test/mocks/client/mock_options.h" #include "test/mocks/common/mock_termination_predicate.h" +#include "test/test_common/environment.h" #include "gtest/gtest.h" @@ -52,20 +53,125 @@ TEST_F(FactoriesTest, CreateBenchmarkClient) { EXPECT_NE(nullptr, benchmark_client.get()); } +TEST_F(FactoriesTest, CreateRequestSourcePluginWithWorkingJsonReturnsWorkingRequestSource) { + absl::optional request_source_plugin_config; + std::string request_source_plugin_config_json = + "{" + "name:\"nighthawk.in-line-options-list-request-source-plugin\"," + "typed_config:{" + "\"@type\":\"type.googleapis.com/" + "nighthawk.request_source.InLineOptionsListRequestSourceConfig\"," + "options_list:{" + "options:[{request_method:\"1\",request_headers:[{header:{key:\":path\",value:\"inlinepath\"}" + "}]}]" + "}," + "}" + "}"; + request_source_plugin_config.emplace(envoy::config::core::v3::TypedExtensionConfig()); + Envoy::MessageUtil::loadFromJson(request_source_plugin_config_json, + request_source_plugin_config.value(), + Envoy::ProtobufMessage::getStrictValidationVisitor()); + EXPECT_CALL(options_, requestMethod()).Times(1); + EXPECT_CALL(options_, requestBodySize()).Times(1); + EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); + EXPECT_CALL(options_, requestSource()).Times(1); + EXPECT_CALL(options_, requestSourcePluginConfig()) + .Times(2) + .WillRepeatedly(ReturnRef(request_source_plugin_config)); + auto cmd = std::make_unique(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); + request_headers->mutable_header()->set_key("foo"); + request_headers->mutable_header()->set_value("bar"); + EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + RequestSourceFactoryImpl factory(options_, *api_); + Envoy::Upstream::ClusterManagerPtr cluster_manager; + Nighthawk::RequestSourcePtr request_source = factory.create( + cluster_manager, dispatcher_, *stats_store_.createScope("foo."), "requestsource"); + EXPECT_NE(nullptr, request_source.get()); + Nighthawk::RequestGenerator generator = request_source->get(); + Nighthawk::RequestPtr request = generator(); + EXPECT_EQ("inlinepath", request->header()->getPathValue()); +} + +TEST_F(FactoriesTest, CreateRequestSourcePluginWithNonWorkingJsonThrowsError) { + absl::optional request_source_plugin_config; + std::string request_source_plugin_config_json = + "{" + R"(name:"nighthawk.file-based-request-source-plugin",)" + "typed_config:{" + R"("@type":"type.googleapis.com/)" + R"(nighthawk.request_source.FileBasedOptionsListRequestSourceConfig",)" + R"(file_path:")" + + TestEnvironment::runfilesPath("test/request_source/test_data/NotARealFile.yaml") + + "\"," + "}" + "}"; + request_source_plugin_config.emplace(envoy::config::core::v3::TypedExtensionConfig()); + Envoy::MessageUtil::loadFromJson(request_source_plugin_config_json, + request_source_plugin_config.value(), + Envoy::ProtobufMessage::getStrictValidationVisitor()); + EXPECT_CALL(options_, requestMethod()).Times(1); + EXPECT_CALL(options_, requestBodySize()).Times(1); + EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); + EXPECT_CALL(options_, requestSource()).Times(1); + EXPECT_CALL(options_, requestSourcePluginConfig()) + .Times(2) + .WillRepeatedly(ReturnRef(request_source_plugin_config)); + auto cmd = std::make_unique(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); + request_headers->mutable_header()->set_key("foo"); + request_headers->mutable_header()->set_value("bar"); + EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + RequestSourceFactoryImpl factory(options_, *api_); + Envoy::Upstream::ClusterManagerPtr cluster_manager; + EXPECT_THROW_WITH_REGEX( + factory.create(cluster_manager, dispatcher_, *stats_store_.createScope("foo."), + "requestsource"), + NighthawkException, + "Request Source plugin loading error should have been caught during input validation"); +} + TEST_F(FactoriesTest, CreateRequestSource) { + absl::optional request_source_plugin_config; EXPECT_CALL(options_, requestMethod()).Times(1); EXPECT_CALL(options_, requestBodySize()).Times(1); EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); EXPECT_CALL(options_, requestSource()).Times(1); + EXPECT_CALL(options_, requestSourcePluginConfig()) + .Times(1) + .WillRepeatedly(ReturnRef(request_source_plugin_config)); + auto cmd = std::make_unique(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); + request_headers->mutable_header()->set_key("foo"); + request_headers->mutable_header()->set_value("bar"); + EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + RequestSourceFactoryImpl factory(options_, *api_); + Envoy::Upstream::ClusterManagerPtr cluster_manager; + RequestSourcePtr request_generator = factory.create( + cluster_manager, dispatcher_, *stats_store_.createScope("foo."), "requestsource"); + EXPECT_NE(nullptr, request_generator.get()); +} + +TEST_F(FactoriesTest, CreateRemoteRequestSource) { + absl::optional request_source_plugin_config; + EXPECT_CALL(options_, requestMethod()).Times(1); + EXPECT_CALL(options_, requestBodySize()).Times(1); + EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); + EXPECT_CALL(options_, requestSource()).Times(1).WillRepeatedly(Return("http://bar/")); + EXPECT_CALL(options_, requestsPerSecond()).Times(1).WillRepeatedly(Return(5)); auto cmd = std::make_unique(); - auto request_headers = cmd->mutable_request_options()->add_request_headers(); + envoy::config::core::v3::HeaderValueOption* request_headers = + cmd->mutable_request_options()->add_request_headers(); request_headers->mutable_header()->set_key("foo"); request_headers->mutable_header()->set_value("bar"); EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); - RequestSourceFactoryImpl factory(options_); + RequestSourceFactoryImpl factory(options_, *api_); Envoy::Upstream::ClusterManagerPtr cluster_manager; - auto request_generator = factory.create(cluster_manager, dispatcher_, - *stats_store_.createScope("foo."), "requestsource"); + RequestSourcePtr request_generator = factory.create( + cluster_manager, dispatcher_, *stats_store_.createScope("foo."), "requestsource"); EXPECT_NE(nullptr, request_generator.get()); } diff --git a/test/integration/test_request_source_plugin.py b/test/integration/test_request_source_plugin.py new file mode 100644 index 000000000..7609841c2 --- /dev/null +++ b/test/integration/test_request_source_plugin.py @@ -0,0 +1,59 @@ +"""Tests for the nighthawk_service binary.""" + +import pytest +import os + +from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) +from test.integration import utility +from test.integration import asserts + + +@pytest.mark.parametrize( + "request_source_config,expected_min,expected_max", + [ + pytest.param(""" + { + name:"nighthawk.in-line-options-list-request-source-plugin", + typed_config:{ + "@type":"type.googleapis.com/nighthawk.request_source.InLineOptionsListRequestSourceConfig", + options_list:{ + options:[ + {request_method:"1",request_body_size:"1",request_headers:[{header:{"key":"x-nighthawk-test-server-config","value":"{response_body_size:13}"}}]}, + {request_method:"1",request_body_size:"2",request_headers:[{header:{"key":"x-nighthawk-test-server-config","value":"{response_body_size:17}"}}]}, + ] + }, + } + }""", + 13, + 17, + id="in-line"), + pytest.param(""" + { + name:"nighthawk.file-based-request-source-plugin", + typed_config:{ + "@type":"type.googleapis.com/nighthawk.request_source.FileBasedOptionsListRequestSourceConfig", + file_path:"%s", + } + }""" % (os.path.dirname(os.path.abspath(os.path.dirname(__file__))) + + "/request_source/test_data/test-config.yaml"), + 13, + 17, + id="file-based"), + ], +) +def test_request_source_plugin_happy_flow_parametrized(http_test_server_fixture, + request_source_config, expected_min, + expected_max): + """Test that the nighthawkClient can run with request-source-plugin option.""" + parsed_json, _ = http_test_server_fixture.runNighthawkClient([ + "--termination-predicate", "benchmark.http_2xx:5", "--rps 10", + "--request-source-plugin-config %s" % request_source_config, + http_test_server_fixture.getTestServerRootUri(), "--request-header", "host: sni.com" + ]) + counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) + asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5) + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_max"]), + expected_max) + asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_min"]), + expected_min) diff --git a/test/request_source/test_data/test-config.yaml b/test/request_source/test_data/test-config.yaml index e8b4cdaac..b78bcba64 100644 --- a/test/request_source/test_data/test-config.yaml +++ b/test/request_source/test_data/test-config.yaml @@ -1,13 +1,11 @@ options: - - request_body_size: 10 + - request_method: 1 + request_body_size: 10 request_headers: - - { header: { key: ":path", value: "/a" } } - - { header: { key: "foo", value: "bar" } } - - { header: { key: "foo", value: "bar2" } } - - { header: { key: "x-nh", value: "1" } } - - request_body_size: 10 + - { header: { key: ":path", value: "/a" } } + - { header: { key: "x-nighthawk-test-server-config", value: "{response_body_size:13}" } } + - request_method: 1 + request_body_size: 10 request_headers: - - { header: { key: ":path", value: "/b" } } - - { header: { key: "bar", value: "foo" } } - - { header: { key: "bar", value: "foo2" } } - - { header: { key: "x-nh", value: "2" } } \ No newline at end of file + - { header: { key: ":path", value: "/b" } } + - { header: { key: "x-nighthawk-test-server-config", value: "{response_body_size:17}" } } \ No newline at end of file diff --git a/tools/format_python_tools.sh b/tools/format_python_tools.sh index 2523042d5..e1657df41 100755 --- a/tools/format_python_tools.sh +++ b/tools/format_python_tools.sh @@ -26,11 +26,12 @@ EXCLUDE="--exclude=benchmarks/tmp/*,.cache/*,*/venv/*,tools/format_python_tools. # E124 Closing bracket does not match visual indentation # E125 Continuation line with same indent as next logical line # E126 Continuation line over-indented for hanging indent +# W504 line break after binary operator # We ignore false positives because of what look like pytest peculiarities # F401 Module imported but unused # F811 Redefinition of unused name from line n -flake8 . ${EXCLUDE} --ignore=E114,E111,E501,F401,F811,E124,E125,E126,D --count --show-source --statistics +flake8 . ${EXCLUDE} --ignore=E114,E111,E501,F401,F811,E124,E125,E126,W504,D --count --show-source --statistics # D = Doc comment related checks (We check both p257 AND google conventions). flake8 . ${EXCLUDE} --docstring-convention pep257 --select=D --count --show-source --statistics flake8 . ${EXCLUDE} --docstring-convention google --select=D --count --show-source --statistics From 4d831ff59d7a53c7aa0839757b075070c7ee927d Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Mon, 16 Nov 2020 13:12:52 -0500 Subject: [PATCH 37/63] Replace deprecated thread annotations macros. (#572) Signed-off-by: Yan Avlasov --- source/common/thread_safe_monotonic_time_stopwatch.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/thread_safe_monotonic_time_stopwatch.h b/source/common/thread_safe_monotonic_time_stopwatch.h index 2e7b57d44..0d2e1e487 100644 --- a/source/common/thread_safe_monotonic_time_stopwatch.h +++ b/source/common/thread_safe_monotonic_time_stopwatch.h @@ -34,7 +34,7 @@ class ThreadSafeMontonicTimeStopwatch : public Stopwatch { private: Envoy::Thread::MutexBasicLockable lock_; - Envoy::MonotonicTime start_ GUARDED_BY(lock_); + Envoy::MonotonicTime start_ ABSL_GUARDED_BY(lock_); }; -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk From d58d63a22a5fe08164f450c2795c272498e06813 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Wed, 2 Dec 2020 14:32:06 -0500 Subject: [PATCH 38/63] Migrating configs of integration tests to Envoy API v3. (#583) Verified that all integration tests pass after updating Envoy to commit `588d9344b31e6544869547c4bcd359b3b0f1d4cf`, so this PR unblocks #575. Our next steps will be adding a good warning and a compatibility flag for users of Nighthawk. If they do send configs with Envoy API v2, we will break by default, but allow them to continue with the compatibility flag. Summary of performed changes: - changing `config` to `typed_config` and listing the correct type. - migrating from deprecated field `tls_context` to `transport_socket`. - changing filter names to ones that match extension names in [extensions_build_config.bzl](https://github.com/envoyproxy/nighthawk/blob/master/extensions_build_config.bzl). - cosmetic changes of enum value from `auto` to `AUTO`. Also: - updating README and help displayed by the CLI in regards to passing in the `--tls-context` flag since this behavior is mirrored by one of the edited integration tests. - Adding the `test_request_source_plugin.py` integration test as a dependency of the `integration_test` py_binary which was forgotten before. Works on #580 Signed-off-by: Jakub Sobon --- README.md | 4 +-- source/client/options_impl.cc | 3 +- test/integration/BUILD | 7 ++++ .../configurations/nighthawk_http_origin.yaml | 15 ++++---- .../nighthawk_https_origin.yaml | 36 +++++++++++-------- .../nighthawk_track_timings.yaml | 18 ++++++---- .../configurations/sni_origin.yaml | 34 ++++++++++-------- test/integration/test_integration_basics.py | 2 +- test/options_test.cc | 28 ++++++++------- 9 files changed, 88 insertions(+), 59 deletions(-) diff --git a/README.md b/README.md index 4c3a2791f..f5a0d58ca 100644 --- a/README.md +++ b/README.md @@ -198,8 +198,8 @@ any other value will allow client-side queuing of requests). Transport socket configuration in json or compact yaml. Mutually exclusive with --tls-context. Example (json): {name:"envoy.transport_sockets.tls" -,typed_config:{"@type":"type.googleapis.com/envoy.api.v2.auth.Upstream -TlsContext" +,typed_config:{"@type":"type.googleapis.com/envoy.extensions.transport +_sockets.tls.v3.UpstreamTlsContext" ,common_tls_context:{tls_params:{cipher_suites:["-ALL:ECDHE-RSA-AES128 -SHA"]}}}} diff --git a/source/client/options_impl.cc b/source/client/options_impl.cc index acac38d80..29af954e7 100644 --- a/source/client/options_impl.cc +++ b/source/client/options_impl.cc @@ -144,7 +144,8 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { "Transport socket configuration in json or compact yaml. " "Mutually exclusive with --tls-context. Example (json): " "{name:\"envoy.transport_sockets.tls\",typed_config:{" - "\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\"," + "\"@type\":\"type.googleapis.com/" + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\"," "common_tls_context:{tls_params:{cipher_suites:[\"-ALL:ECDHE-RSA-AES128-SHA\"]}}}}", false, "", "string", cmd); diff --git a/test/integration/BUILD b/test/integration/BUILD index 00765a29b..1290e6090 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -106,6 +106,12 @@ py_library( deps = [":integration_test_base"], ) +py_library( + name = "test_request_source_plugin_lib", + srcs = ["test_request_source_plugin.py"], + deps = [":integration_test_base"], +) + py_binary( name = "integration_test", srcs = ["integration_test.py"], @@ -122,5 +128,6 @@ py_binary( ":test_integration_zipkin_lib", ":test_output_transform_lib", ":test_remote_execution_lib", + ":test_request_source_plugin_lib", ], ) diff --git a/test/integration/configurations/nighthawk_http_origin.yaml b/test/integration/configurations/nighthawk_http_origin.yaml index 4854523de..618a11758 100644 --- a/test/integration/configurations/nighthawk_http_origin.yaml +++ b/test/integration/configurations/nighthawk_http_origin.yaml @@ -11,10 +11,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -26,10 +27,12 @@ static_resources: - name: time-tracking - name: dynamic-delay - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 response_headers: - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false diff --git a/test/integration/configurations/nighthawk_https_origin.yaml b/test/integration/configurations/nighthawk_https_origin.yaml index 708dbdb67..9bdc69805 100644 --- a/test/integration/configurations/nighthawk_https_origin.yaml +++ b/test/integration/configurations/nighthawk_https_origin.yaml @@ -11,10 +11,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -24,19 +25,24 @@ static_resources: - "*" http_filters: - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 response_headers: - { header: { key: "x-nh", value: "1" } } - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: - inline_string: | - @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/servercert.pem - private_key: - inline_string: | - @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/servercert.pem + private_key: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem diff --git a/test/integration/configurations/nighthawk_track_timings.yaml b/test/integration/configurations/nighthawk_track_timings.yaml index 7f4d1b2f5..ab4b37db0 100644 --- a/test/integration/configurations/nighthawk_track_timings.yaml +++ b/test/integration/configurations/nighthawk_track_timings.yaml @@ -14,10 +14,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -28,11 +29,14 @@ static_resources: http_filters: # Here we set up the time-tracking extension to emit request-arrival delta timings in a response header. - name: time-tracking - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions emit_previous_request_delta_in_response_header: x-origin-request-receipt-delta - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false diff --git a/test/integration/configurations/sni_origin.yaml b/test/integration/configurations/sni_origin.yaml index 9d49bae35..ba531fd7c 100644 --- a/test/integration/configurations/sni_origin.yaml +++ b/test/integration/configurations/sni_origin.yaml @@ -18,7 +18,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -36,7 +36,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -46,10 +46,11 @@ static_resources: inline_string: | @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -59,18 +60,21 @@ static_resources: - "sni.com" http_filters: - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 response_headers: - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -80,10 +84,12 @@ static_resources: - "*" http_filters: - name: test-server - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 response_headers: - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 5639fd99d..03174800b 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -311,7 +311,7 @@ def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2) else: json_template = "%s%s%s" % ( "{name:\"envoy.transport_sockets.tls\",typed_config:{", - "\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",", + "\"@type\":\"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\",", "common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}}") for cipher in [ diff --git a/test/options_test.cc b/test/options_test.cc index 6c8a5a8ae..d65604b67 100644 --- a/test/options_test.cc +++ b/test/options_test.cc @@ -121,7 +121,8 @@ TEST_F(OptionsImplTest, AlmostAll) { "--latency-response-header-name zz", client_name_, "{name:\"envoy.transport_sockets.tls\"," - "typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\"," + "typed_config:{\"@type\":\"type.googleapis.com/" + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\"," "common_tls_context:{tls_params:{" "cipher_suites:[\"-ALL:ECDHE-RSA-AES256-GCM-SHA384\"]}}}}", good_test_uri_, sink_json_1, sink_json_2)); @@ -142,18 +143,19 @@ TEST_F(OptionsImplTest, AlmostAll) { const std::vector expected_headers = {"f1:b1", "f2:b2", "f3:b3:b4"}; EXPECT_EQ(expected_headers, options->requestHeaders()); EXPECT_EQ(1234, options->requestBodySize()); - EXPECT_EQ("name: \"envoy.transport_sockets.tls\"\n" - "typed_config {\n" - " [type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext] {\n" - " common_tls_context {\n" - " tls_params {\n" - " cipher_suites: \"-ALL:ECDHE-RSA-AES256-GCM-SHA384\"\n" - " }\n" - " }\n" - " }\n" - "}\n" - "183412668: \"envoy.api.v2.core.TransportSocket\"\n", - options->transportSocket().value().DebugString()); + EXPECT_EQ( + "name: \"envoy.transport_sockets.tls\"\n" + "typed_config {\n" + " [type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext] {\n" + " common_tls_context {\n" + " tls_params {\n" + " cipher_suites: \"-ALL:ECDHE-RSA-AES256-GCM-SHA384\"\n" + " }\n" + " }\n" + " }\n" + "}\n" + "183412668: \"envoy.api.v2.core.TransportSocket\"\n", + options->transportSocket().value().DebugString()); EXPECT_EQ(10, options->maxPendingRequests()); EXPECT_EQ(11, options->maxActiveRequests()); EXPECT_EQ(12, options->maxRequestsPerConnection()); From f5f88e31539dd7522a6c5f8d0ef095c085343491 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Wed, 2 Dec 2020 14:33:05 -0500 Subject: [PATCH 39/63] Fix integration tests when running with IPv6. (#579) We don't run integration tests in IPv6 mode on CI (See #578). As a result some of the test expectations became invalid. Also: - Pytest now displays more logging on test failures including the stderr and stdout of the started nighthawk test server. - fixing the default IPv6 address, as per `man 3 inet_pton`, the address `::/0` isn't valid, while `::` is. Signed-off-by: Jakub Sobon --- benchmarks/benchmarks.py | 5 ++++- test/integration/integration_test.py | 6 +++++- test/integration/integration_test_fixtures.py | 2 +- test/integration/nighthawk_test_server.py | 6 +++--- test/integration/test_integration_basics.py | 4 ++-- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py index 654c5c324..b4c22b358 100644 --- a/benchmarks/benchmarks.py +++ b/benchmarks/benchmarks.py @@ -9,5 +9,8 @@ if __name__ == '__main__': path = os.path.dirname(os.path.realpath(__file__)) - r = pytest.main(["--rootdir=" + path, "-x", path, "-p", "no:cacheprovider", *sys.argv]) + r = pytest.main([ + "--rootdir=" + path, "-x", path, "-p", "no:cacheprovider", "--log-level", "INFO", + "--log-cli-level", "INFO", *sys.argv + ]) exit(r) diff --git a/test/integration/integration_test.py b/test/integration/integration_test.py index b715408f4..6c49d974c 100644 --- a/test/integration/integration_test.py +++ b/test/integration/integration_test.py @@ -22,7 +22,11 @@ "-x", path, "-n", - "4" if utility.isSanitizerRun() else "20" # Number of tests to run in parallel + "4" if utility.isSanitizerRun() else "20", # Number of tests to run in parallel + "--log-level", + "INFO", + "--log-cli-level", + "INFO", ], plugins=["xdist"]) exit(r) diff --git a/test/integration/integration_test_fixtures.py b/test/integration/integration_test_fixtures.py index e256d53e4..7850eef26 100644 --- a/test/integration/integration_test_fixtures.py +++ b/test/integration/integration_test_fixtures.py @@ -75,7 +75,7 @@ def __init__(self, ip_version, server_config, backend_count=1): super(IntegrationTestBase, self).__init__() assert ip_version != IpVersion.UNKNOWN self.ip_version = ip_version - self.server_ip = "::/0" if ip_version == IpVersion.IPV6 else "0.0.0.0" + self.server_ip = "::" if ip_version == IpVersion.IPV6 else "0.0.0.0" self.server_ip = os.getenv("TEST_SERVER_EXTERNAL_IP", self.server_ip) self.tag = "" self.parameters = {} diff --git a/test/integration/nighthawk_test_server.py b/test/integration/nighthawk_test_server.py index 6a6deb849..e23911a3e 100644 --- a/test/integration/nighthawk_test_server.py +++ b/test/integration/nighthawk_test_server.py @@ -124,8 +124,8 @@ def _serverThreadRunner(self): logging.info("Test server popen() args: %s" % str.join(" ", args)) self._server_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = self._server_process.communicate() - logging.debug(stdout.decode("utf-8")) - logging.debug(stderr.decode("utf-8")) + logging.info("Process stdout: %s", stdout.decode("utf-8")) + logging.info("Process stderr: %s", stderr.decode("utf-8")) def fetchJsonFromAdminInterface(self, path): """Fetch and parse json from the admin interface. @@ -183,7 +183,7 @@ def enableCpuProfiler(self): return r.status_code == 200 def _waitUntilServerListening(self): - # we allow 30 seconds for the server to have its listeners up. + # we allow some time for the server to have its listeners up. # (It seems that in sanitizer-enabled runs this can take a little while) timeout = time.time() + 60 while time.time() < timeout: diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 03174800b..3fe989aa6 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -39,7 +39,7 @@ def test_http_h1(http_test_server_fixture): asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterEqual( counters, "upstream_cx_tx_bytes_total", - 1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) + 1375 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 1) @@ -223,7 +223,7 @@ def test_https_h1(https_test_server_fixture): asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterEqual( counters, "upstream_cx_tx_bytes_total", - 1400 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) + 1375 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) From 31b785747ae6b998ff505f4f8148f9f8aa62136b Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Thu, 3 Dec 2020 16:25:20 -0500 Subject: [PATCH 40/63] Migrating config for benchmark tests to Envoy API v3. (#585) Signed-off-by: Jakub Sobon --- benchmarks/configurations/envoy_proxy.yaml | 29 ++++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/benchmarks/configurations/envoy_proxy.yaml b/benchmarks/configurations/envoy_proxy.yaml index a7e984dc3..700217df4 100644 --- a/benchmarks/configurations/envoy_proxy.yaml +++ b/benchmarks/configurations/envoy_proxy.yaml @@ -11,10 +11,11 @@ static_resources: port_value: 0 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -28,15 +29,21 @@ static_resources: route: cluster: local_service http_filters: - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false clusters: - name: local_service connect_timeout: 0.25s - type: strict_dns - lb_policy: round_robin - hosts: - - socket_address: - address: $server_ip - port_value: $server_port + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: $server_ip + port_value: $server_port From 2afa57b98504d7b9902a5dc083dfb4bc6888280f Mon Sep 17 00:00:00 2001 From: Kush Trivedi <44091822+kushthedude@users.noreply.github.com> Date: Fri, 4 Dec 2020 22:46:38 +0530 Subject: [PATCH 41/63] chore: add issue and pull request templates (#515) * chore: add issue and pull request templates Signed-off-by: Kush Trivedi * add link to contributing.md Signed-off-by: Kush Trivedi --- .../ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md | 16 ++++++++++ .github/ISSUE_TEMPLATE/bug.md | 32 +++++++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.md | 17 ++++++++++ 3 files changed, 65 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/ISSUE_TEMPLATE/bug.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md b/.github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..a91d54f04 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,16 @@ +**Description** + + + +This PR is related to # + + +**Notes for Reviewers** + + \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md new file mode 100644 index 000000000..6ee5fa021 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -0,0 +1,32 @@ +--- +name: Non-{crash,security} bug +about: Bugs which are not crashes, DoS or other security issue +title: '' +labels: bug,triage +assignees: '' + +--- + +*Title*: *One line description* + +*Description*: +>What issue is being seen? Describe what should be happening instead of +the bug, for example: Nighthawk should not crash, the expected value isn't +returned, etc. + +*Reproduction steps*: +> Include sample requests, environment, etc. All data and inputs +required to reproduce the bug. + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Logs*: +>Include the Nighthawk logs. + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Call Stack*: +> If the Envoy binary is crashing, a call stack is **required**. +Please refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution). \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..ef966b857 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: enhancement,triage +assignees: '' + +--- + +*Title*: *One line description* + +*Description*: +>Describe the the desired behavior, what scenario it enables and how it +would be used. + +[optional *Relevant Links*:] +>Any extra documentation required to understand the issue. \ No newline at end of file From dac3cc165fe3cf889737531b913b15c25cb89bdd Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 7 Dec 2020 17:58:50 +0100 Subject: [PATCH 42/63] Schedule option (#573) Adds a new schedule option to the proto api., which allows specifying a date/time at which execution should start. Should also fix #569 by reducing reliance on `Envoy::SystemTime` to a minimum: Based on the value schedule option, a monotonic time will be computed and propagated. This makes us use `Envoy::MonotonicTime` again in places where sub ms resolution is a must have. (The fix will need confirmation, as I couldn't get `--config=libc++` to work for other reasons.) Signed-off-by: Otto van der Schaaf --- api/client/options.proto | 4 + api/client/output.proto | 1 + include/nighthawk/client/options.h | 3 +- include/nighthawk/client/output_collector.h | 7 +- include/nighthawk/common/factories.h | 4 +- include/nighthawk/common/rate_limiter.h | 6 ++ include/nighthawk/common/sequencer.h | 6 ++ source/client/client_worker_impl.cc | 2 +- source/client/client_worker_impl.h | 2 +- source/client/factories_impl.cc | 12 ++- source/client/factories_impl.h | 4 +- source/client/options_impl.cc | 12 ++- source/client/options_impl.h | 2 + source/client/output_collector_impl.cc | 15 +++- source/client/output_collector_impl.h | 3 +- source/client/process_impl.cc | 74 +++++++++++------ source/client/process_impl.h | 37 ++++++++- source/common/rate_limiter_impl.cc | 8 +- source/common/rate_limiter_impl.h | 13 ++- source/common/sequencer_impl.h | 2 + source/common/termination_predicate_impl.cc | 4 +- source/common/termination_predicate_impl.h | 4 +- test/BUILD | 1 + test/client_worker_test.cc | 2 +- test/factories_test.cc | 2 +- test/mocks/client/mock_options.h | 1 + test/mocks/common/mock_rate_limiter.h | 1 + test/mocks/common/mock_sequencer.h | 2 + test/mocks/common/mock_sequencer_factory.h | 2 +- .../mock_termination_predicate_factory.h | 2 +- test/output_formatter_test.cc | 6 +- test/process_test.cc | 81 +++++++++++++++++++ test/rate_limiter_test.cc | 9 ++- test/termination_predicate_test.cc | 2 +- test/test_data/output_formatter.json.gold | 6 +- test/test_data/output_formatter.yaml.gold | 2 + 36 files changed, 272 insertions(+), 72 deletions(-) diff --git a/api/client/options.proto b/api/client/options.proto index 13d7f1819..d1108e908 100644 --- a/api/client/options.proto +++ b/api/client/options.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package nighthawk.client; import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/metrics/v3/stats.proto"; @@ -219,4 +220,7 @@ message CommandLineOptions { // "emit_previous_request_delta_in_response_header" to record elapsed time between request // arrivals. google.protobuf.StringValue latency_response_header_name = 36; + // Provide an execution starting date and time. Optional, any value specified must be in the + // future. + google.protobuf.Timestamp scheduled_start = 105; } diff --git a/api/client/output.proto b/api/client/output.proto index 8e74f34f4..cc3e339db 100644 --- a/api/client/output.proto +++ b/api/client/output.proto @@ -49,6 +49,7 @@ message Result { repeated Statistic statistics = 2; repeated Counter counters = 3; google.protobuf.Duration execution_duration = 4; + google.protobuf.Timestamp execution_start = 5; } message Output { diff --git a/include/nighthawk/client/options.h b/include/nighthawk/client/options.h index c87b03c5f..5e6dc8292 100644 --- a/include/nighthawk/client/options.h +++ b/include/nighthawk/client/options.h @@ -6,6 +6,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/time.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/metrics/v3/stats.pb.h" @@ -74,7 +75,7 @@ class Options { virtual std::vector statsSinks() const PURE; virtual uint32_t statsFlushInterval() const PURE; virtual std::string responseHeaderWithLatencyInput() const PURE; - + virtual absl::optional scheduled_start() const PURE; /** * Converts an Options instance to an equivalent CommandLineOptions instance in terms of option * values. diff --git a/include/nighthawk/client/output_collector.h b/include/nighthawk/client/output_collector.h index 1ff274821..bd457681e 100644 --- a/include/nighthawk/client/output_collector.h +++ b/include/nighthawk/client/output_collector.h @@ -3,9 +3,12 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/time.h" #include "nighthawk/common/statistic.h" +#include "absl/types/optional.h" + namespace Nighthawk { namespace Client { @@ -23,10 +26,12 @@ class OutputCollector { * @param statistics Reference to a vector of statistics to add to the output. * @param counters Reference to a map of counter values, keyed by name, to add to the output. * @param execution_duration Execution duration associated to the to-be-added result. + * @param first_acquisition_time Timing of the first rate limiter acquisition. */ virtual void addResult(absl::string_view name, const std::vector& statistics, const std::map& counters, - const std::chrono::nanoseconds execution_duration) PURE; + const std::chrono::nanoseconds execution_duration, + const absl::optional& first_acquisition_time) PURE; /** * Directly sets the output value. * diff --git a/include/nighthawk/common/factories.h b/include/nighthawk/common/factories.h index 20d5eb6a6..c93097b79 100644 --- a/include/nighthawk/common/factories.h +++ b/include/nighthawk/common/factories.h @@ -24,7 +24,7 @@ class SequencerFactory { const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const PURE; + const Envoy::MonotonicTime scheduled_starting_time) const PURE; }; class StatisticFactory { @@ -46,7 +46,7 @@ class TerminationPredicateFactory { virtual ~TerminationPredicateFactory() = default; virtual TerminationPredicatePtr create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const PURE; + const Envoy::MonotonicTime scheduled_starting_time) const PURE; }; /** diff --git a/include/nighthawk/common/rate_limiter.h b/include/nighthawk/common/rate_limiter.h index c5f5f69e8..0c2eadad8 100644 --- a/include/nighthawk/common/rate_limiter.h +++ b/include/nighthawk/common/rate_limiter.h @@ -33,6 +33,12 @@ class RateLimiter { * @return Envoy::TimeSource& time_source used to track time. */ virtual Envoy::TimeSource& timeSource() PURE; + + /** + * @return absl::optional Time of the first acquisition, if any. + */ + virtual absl::optional firstAcquisitionTime() const PURE; + /** * @return std::chrono::nanoseconds elapsed since the first call to tryAcquireOne(). Used by some * rate limiter implementations to compute acquisition rate. diff --git a/include/nighthawk/common/sequencer.h b/include/nighthawk/common/sequencer.h index afc548518..56e177032 100644 --- a/include/nighthawk/common/sequencer.h +++ b/include/nighthawk/common/sequencer.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" #include "nighthawk/common/operation_callback.h" +#include "nighthawk/common/rate_limiter.h" #include "nighthawk/common/statistic.h" namespace Nighthawk { @@ -35,6 +36,11 @@ class Sequencer { */ virtual std::chrono::nanoseconds executionDuration() const PURE; + /** + * @return RateLimiter& reference to the rate limiter associated to this sequencer. + */ + virtual const RateLimiter& rate_limiter() const PURE; + /** * @return double an up-to-date completions per second rate. */ diff --git a/source/client/client_worker_impl.cc b/source/client/client_worker_impl.cc index a6c23542c..a5d456763 100644 --- a/source/client/client_worker_impl.cc +++ b/source/client/client_worker_impl.cc @@ -19,7 +19,7 @@ ClientWorkerImpl::ClientWorkerImpl(Envoy::Api::Api& api, Envoy::ThreadLocal::Ins const SequencerFactory& sequencer_factory, const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, - const Envoy::SystemTime starting_time, + const Envoy::MonotonicTime starting_time, Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style) : WorkerImpl(api, tls, store), diff --git a/source/client/client_worker_impl.h b/source/client/client_worker_impl.h index 0decef819..41b2660bc 100644 --- a/source/client/client_worker_impl.h +++ b/source/client/client_worker_impl.h @@ -33,7 +33,7 @@ class ClientWorkerImpl : public WorkerImpl, virtual public ClientWorker { const SequencerFactory& sequencer_factory, const RequestSourceFactory& request_generator_factory, Envoy::Stats::Store& store, const int worker_number, - const Envoy::SystemTime starting_time, + const Envoy::MonotonicTime starting_time, Envoy::Tracing::HttpTracerSharedPtr& http_tracer, const HardCodedWarmupStyle hardcoded_warmup_style); StatisticPtrMap statistics() const override; diff --git a/source/client/factories_impl.cc b/source/client/factories_impl.cc index 01ae6c50d..2031658c4 100644 --- a/source/client/factories_impl.cc +++ b/source/client/factories_impl.cc @@ -65,12 +65,10 @@ BenchmarkClientPtr BenchmarkClientFactoryImpl::create( SequencerFactoryImpl::SequencerFactoryImpl(const Options& options) : OptionBasedFactoryImpl(options) {} -SequencerPtr SequencerFactoryImpl::create(Envoy::TimeSource& time_source, - Envoy::Event::Dispatcher& dispatcher, - const SequencerTarget& sequencer_target, - TerminationPredicatePtr&& termination_predicate, - Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const { +SequencerPtr SequencerFactoryImpl::create( + Envoy::TimeSource& time_source, Envoy::Event::Dispatcher& dispatcher, + const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, + Envoy::Stats::Scope& scope, const Envoy::MonotonicTime scheduled_starting_time) const { StatisticFactoryImpl statistic_factory(options_); Frequency frequency(options_.requestsPerSecond()); RateLimiterPtr rate_limiter = std::make_unique( @@ -211,7 +209,7 @@ TerminationPredicateFactoryImpl::TerminationPredicateFactoryImpl(const Options& TerminationPredicatePtr TerminationPredicateFactoryImpl::create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const { + const Envoy::MonotonicTime scheduled_starting_time) const { // We'll always link a predicate which checks for requests to cancel. TerminationPredicatePtr root_predicate = std::make_unique( diff --git a/source/client/factories_impl.h b/source/client/factories_impl.h index 2c8abfd6d..2932a72bf 100644 --- a/source/client/factories_impl.h +++ b/source/client/factories_impl.h @@ -44,7 +44,7 @@ class SequencerFactoryImpl : public OptionBasedFactoryImpl, public SequencerFact SequencerPtr create(Envoy::TimeSource& time_source, Envoy::Event::Dispatcher& dispatcher, const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const override; + const Envoy::MonotonicTime scheduled_starting_time) const override; }; class StatisticFactoryImpl : public OptionBasedFactoryImpl, public StatisticFactory { @@ -93,7 +93,7 @@ class TerminationPredicateFactoryImpl : public OptionBasedFactoryImpl, public: TerminationPredicateFactoryImpl(const Options& options); TerminationPredicatePtr create(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time) const override; + const Envoy::MonotonicTime scheduled_starting_time) const override; TerminationPredicate* linkConfiguredPredicates( TerminationPredicate& last_predicate, const TerminationPredicateMap& predicates, const TerminationPredicate::Status termination_status, Envoy::Stats::Scope& scope) const; diff --git a/source/client/options_impl.cc b/source/client/options_impl.cc index 29af954e7..b4ff7ae3e 100644 --- a/source/client/options_impl.cc +++ b/source/client/options_impl.cc @@ -652,7 +652,12 @@ OptionsImpl::OptionsImpl(const nighthawk::client::CommandLineOptions& options) { std::copy(options.labels().begin(), options.labels().end(), std::back_inserter(labels_)); latency_response_header_name_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( options, latency_response_header_name, latency_response_header_name_); - + if (options.has_scheduled_start()) { + const auto elapsed_since_epoch = std::chrono::nanoseconds(options.scheduled_start().nanos()) + + std::chrono::seconds(options.scheduled_start().seconds()); + scheduled_start_ = + Envoy::SystemTime(std::chrono::time_point(elapsed_since_epoch)); + } validate(); } @@ -829,6 +834,11 @@ CommandLineOptionsPtr OptionsImpl::toCommandLineOptionsInternal() const { command_line_options->mutable_stats_flush_interval()->set_value(stats_flush_interval_); command_line_options->mutable_latency_response_header_name()->set_value( latency_response_header_name_); + if (scheduled_start_.has_value()) { + *(command_line_options->mutable_scheduled_start()) = + Envoy::ProtobufUtil::TimeUtil::NanosecondsToTimestamp( + scheduled_start_.value().time_since_epoch().count()); + } return command_line_options; } diff --git a/source/client/options_impl.h b/source/client/options_impl.h index c43c211a3..7132aba01 100644 --- a/source/client/options_impl.h +++ b/source/client/options_impl.h @@ -93,6 +93,7 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable scheduled_start() const override { return scheduled_start_; } private: void parsePredicates(const TCLAP::MultiArg& arg, @@ -149,6 +150,7 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable stats_sinks_; uint32_t stats_flush_interval_{5}; std::string latency_response_header_name_; + absl::optional scheduled_start_; }; } // namespace Client diff --git a/source/client/output_collector_impl.cc b/source/client/output_collector_impl.cc index 2303c164a..b6749f0c9 100644 --- a/source/client/output_collector_impl.cc +++ b/source/client/output_collector_impl.cc @@ -23,12 +23,19 @@ OutputCollectorImpl::OutputCollectorImpl(Envoy::TimeSource& time_source, const O nighthawk::client::Output OutputCollectorImpl::toProto() const { return output_; } -void OutputCollectorImpl::addResult(absl::string_view name, - const std::vector& statistics, - const std::map& counters, - const std::chrono::nanoseconds execution_duration) { +void OutputCollectorImpl::addResult( + absl::string_view name, const std::vector& statistics, + const std::map& counters, + const std::chrono::nanoseconds execution_duration, + const absl::optional& first_acquisition_time) { auto result = output_.add_results(); result->set_name(name.data(), name.size()); + if (first_acquisition_time.has_value()) { + *(result->mutable_execution_start()) = Envoy::Protobuf::util::TimeUtil::NanosecondsToTimestamp( + std::chrono::duration_cast( + first_acquisition_time.value().time_since_epoch()) + .count()); + } for (auto& statistic : statistics) { // TODO(#292): Looking at if the statistic id ends with "_size" to determine how it should be // serialized is kind of hacky. Maybe we should have a lookup table of sorts, to determine how diff --git a/source/client/output_collector_impl.h b/source/client/output_collector_impl.h index 40109f936..ccc9d5bae 100644 --- a/source/client/output_collector_impl.h +++ b/source/client/output_collector_impl.h @@ -20,7 +20,8 @@ class OutputCollectorImpl : public OutputCollector { void addResult(absl::string_view name, const std::vector& statistics, const std::map& counters, - const std::chrono::nanoseconds execution_duration) override; + const std::chrono::nanoseconds execution_duration, + const absl::optional& first_acquisition_time) override; void setOutput(const nighthawk::client::Output& output) override { output_ = output; } nighthawk::client::Output toProto() const override; diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 08cbaefe5..cbf3b695d 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -29,6 +29,7 @@ #include "external/envoy/source/server/server.h" #include "absl/strings/str_replace.h" +#include "absl/types/optional.h" // TODO(oschaaf): See if we can leverage a static module registration like Envoy does to avoid the // ifdefs in this file. @@ -164,32 +165,37 @@ bool ProcessImpl::requestExecutionCancellation() { return true; } -void ProcessImpl::createWorkers(const uint32_t concurrency) { - // TODO(oschaaf): Expose kMinimalDelay in configuration. - const std::chrono::milliseconds kMinimalWorkerDelay = 500ms + (concurrency * 50ms); - ASSERT(workers_.empty()); +Envoy::MonotonicTime +ProcessImpl::computeFirstWorkerStart(Envoy::Event::TimeSystem& time_system, + const absl::optional& scheduled_start, + const uint32_t concurrency) { + const std::chrono::nanoseconds first_worker_delay = + scheduled_start.has_value() ? scheduled_start.value() - time_system.systemTime() + : 500ms + (concurrency * 50ms); + const Envoy::MonotonicTime monotonic_now = time_system.monotonicTime(); + const Envoy::MonotonicTime first_worker_start = monotonic_now + first_worker_delay; + return first_worker_start; +} - // We try to offset the start of each thread so that workers will execute tasks evenly spaced in - // time. Let's assume we have two workers w0/w1, which should maintain a combined global pace of - // 1000Hz. w0 and w1 both run at 500Hz, but ideally their execution is evenly spaced in time, - // and not overlapping. Workers start offsets can be computed like - // "worker_number*(1/global_frequency))", which would yield T0+[0ms, 1ms]. This helps reduce - // batching/queueing effects, both initially, but also by calibrating the linear rate limiter we - // currently have to a precise starting time, which helps later on. - // TODO(oschaaf): Arguably, this ought to be the job of a rate limiter with awareness of the - // global status quo, which we do not have right now. This has been noted in the - // track-for-future issue. - const auto first_worker_start = time_system_.systemTime() + kMinimalWorkerDelay; - const double inter_worker_delay_usec = - (1. / options_.requestsPerSecond()) * 1000000 / concurrency; +std::chrono::nanoseconds ProcessImpl::computeInterWorkerDelay(const uint32_t concurrency, + const uint32_t rps) { + const double inter_worker_delay_usec = (1. / rps) * 1000000 / concurrency; + return std::chrono::duration_cast(inter_worker_delay_usec * 1us); +} + +void ProcessImpl::createWorkers(const uint32_t concurrency, + const absl::optional& scheduled_start) { + ASSERT(workers_.empty()); + const Envoy::MonotonicTime first_worker_start = + computeFirstWorkerStart(time_system_, scheduled_start, concurrency); + const std::chrono::nanoseconds inter_worker_delay = + computeInterWorkerDelay(concurrency, options_.requestsPerSecond()); int worker_number = 0; while (workers_.size() < concurrency) { - const auto worker_delay = std::chrono::duration_cast( - ((inter_worker_delay_usec * worker_number) * 1us)); workers_.push_back(std::make_unique( *api_, tls_, cluster_manager_, benchmark_client_factory_, termination_predicate_factory_, sequencer_factory_, request_generator_factory_, store_root_, worker_number, - first_worker_start + worker_delay, http_tracer_, + first_worker_start + (inter_worker_delay * worker_number), http_tracer_, options_.simpleWarmup() ? ClientWorkerImpl::HardCodedWarmupStyle::ON : ClientWorkerImpl::HardCodedWarmupStyle::OFF)); worker_number++; @@ -445,7 +451,13 @@ void ProcessImpl::setupStatsSinks(const envoy::config::bootstrap::v3::Bootstrap& } bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector& uris, - const UriPtr& request_source_uri, const UriPtr& tracing_uri) { + const UriPtr& request_source_uri, const UriPtr& tracing_uri, + const absl::optional& scheduled_start) { + const Envoy::SystemTime now = time_system_.systemTime(); + if (scheduled_start.value_or(now) < now) { + ENVOY_LOG(error, "Scheduled execution date already transpired."); + return false; + } { auto guard = std::make_unique(workers_lock_); if (cancelled_) { @@ -461,7 +473,7 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector( @@ -522,15 +534,26 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector first_acquisition_time = absl::nullopt; + for (auto& worker : workers_) { auto sequencer_execution_duration = worker->phase().sequencer().executionDuration(); + absl::optional worker_first_acquisition_time = + worker->phase().sequencer().rate_limiter().firstAcquisitionTime(); + if (worker_first_acquisition_time.has_value()) { + first_acquisition_time = + first_acquisition_time.has_value() + ? std::min(first_acquisition_time.value(), worker_first_acquisition_time.value()) + : worker_first_acquisition_time.value(); + } // We don't write per-worker results if we only have a single worker, because the global // results will be precisely the same. if (workers_.size() > 1) { StatisticFactoryImpl statistic_factory(options_); collector.addResult(fmt::format("worker_{}", i), vectorizeStatisticPtrMap(worker->statistics()), - worker->threadLocalCounterValues(), sequencer_execution_duration); + worker->threadLocalCounterValues(), sequencer_execution_duration, + worker_first_acquisition_time); } total_execution_duration += sequencer_execution_duration; i++; @@ -545,7 +568,7 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector 0; }); StatisticFactoryImpl statistic_factory(options_); collector.addResult("global", mergeWorkerStatistics(workers_), counters, - total_execution_duration / workers_.size()); + total_execution_duration / workers_.size(), first_acquisition_time); return counters.find("sequencer.failed_terminations") == counters.end(); } @@ -585,7 +608,8 @@ bool ProcessImpl::run(OutputCollector& collector) { } try { - return runInternal(collector, uris, request_source_uri, tracing_uri); + return runInternal(collector, uris, request_source_uri, tracing_uri, + options_.scheduled_start()); } catch (Envoy::EnvoyException& ex) { ENVOY_LOG(error, "Fatal exception: {}", ex.what()); throw; diff --git a/source/client/process_impl.h b/source/client/process_impl.h index f09bb05cd..d14b86e6a 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -109,7 +109,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable& schedule); std::vector vectorizeStatisticPtrMap(const StatisticPtrMap& statistics) const; std::vector mergeWorkerStatistics(const std::vector& workers) const; @@ -124,7 +124,40 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable>& stats_sinks); bool runInternal(OutputCollector& collector, const std::vector& uris, - const UriPtr& request_source_uri, const UriPtr& tracing_uri); + const UriPtr& request_source_uri, const UriPtr& tracing_uri, + const absl::optional& schedule); + + /** + * Compute the offset at which execution should start. We adhere to the scheduled start passed in + * as an argument when specified, otherwise we need a delay that will be sufficient for all the + * workers to get up and running. + * + * @param time_system Time system used to obtain the current time. + * @param scheduled_start Optional scheduled start. + * @param concurrency The number of workers that will be used during execution. + * @return Envoy::MonotonicTime Time at which execution should start. + */ + static Envoy::MonotonicTime + computeFirstWorkerStart(Envoy::Event::TimeSystem& time_system, + const absl::optional& scheduled_start, + const uint32_t concurrency); + + /** + * We offset the start of each thread so that workers will execute tasks evenly spaced in + * time. Let's assume we have two workers w0/w1, which should maintain a combined global pace of + * 1000Hz. w0 and w1 both run at 500Hz, but ideally their execution is evenly spaced in time, + * and not overlapping. Workers start offsets can be computed like + * "worker_number*(1/global_frequency))", which would yield T0+[0ms, 1ms]. This helps reduce + * batching/queueing effects, both initially, but also by calibrating the linear rate limiter we + * currently have to a precise starting time, which helps later on. + * + * @param concurrency The number of workers that will be used during execution. + * @param rps Anticipated requests per second during execution. + * @return std::chrono::nanoseconds The delay that should be used as an offset between each + * independent worker execution start. + */ + static std::chrono::nanoseconds computeInterWorkerDelay(const uint32_t concurrency, + const uint32_t rps); std::shared_ptr process_wide_; Envoy::PlatformImpl platform_impl_; diff --git a/source/common/rate_limiter_impl.cc b/source/common/rate_limiter_impl.cc index 7d4aad4fc..15f1fd71b 100644 --- a/source/common/rate_limiter_impl.cc +++ b/source/common/rate_limiter_impl.cc @@ -53,16 +53,16 @@ void BurstingRateLimiter::releaseOne() { } ScheduledStartingRateLimiter::ScheduledStartingRateLimiter( - RateLimiterPtr&& rate_limiter, const Envoy::SystemTime scheduled_starting_time) + RateLimiterPtr&& rate_limiter, const Envoy::MonotonicTime scheduled_starting_time) : ForwardingRateLimiterImpl(std::move(rate_limiter)), scheduled_starting_time_(scheduled_starting_time) { - if (timeSource().systemTime() >= scheduled_starting_time_) { + if (timeSource().monotonicTime() >= scheduled_starting_time_) { ENVOY_LOG(error, "Scheduled starting time exceeded. This may cause unintended bursty traffic."); } } bool ScheduledStartingRateLimiter::tryAcquireOne() { - if (timeSource().systemTime() < scheduled_starting_time_) { + if (timeSource().monotonicTime() < scheduled_starting_time_) { aquisition_attempted_ = true; return false; } @@ -76,7 +76,7 @@ bool ScheduledStartingRateLimiter::tryAcquireOne() { } void ScheduledStartingRateLimiter::releaseOne() { - if (timeSource().systemTime() < scheduled_starting_time_) { + if (timeSource().monotonicTime() < scheduled_starting_time_) { throw NighthawkException("Unexpected call to releaseOne()"); } return rate_limiter_->releaseOne(); diff --git a/source/common/rate_limiter_impl.h b/source/common/rate_limiter_impl.h index 70a42e0ae..1d22e5a43 100644 --- a/source/common/rate_limiter_impl.h +++ b/source/common/rate_limiter_impl.h @@ -30,14 +30,20 @@ class RateLimiterBaseImpl : public RateLimiter { // TODO(oschaaf): consider adding an explicit start() call to the interface. const auto now = time_source_.monotonicTime(); if (start_time_ == absl::nullopt) { + first_acquisition_time_ = time_source_.systemTime(); start_time_ = now; } return now - start_time_.value(); } + absl::optional firstAcquisitionTime() const override { + return first_acquisition_time_; + } + private: Envoy::TimeSource& time_source_; absl::optional start_time_; + absl::optional first_acquisition_time_; }; /** @@ -86,6 +92,9 @@ class ForwardingRateLimiterImpl : public RateLimiter { : rate_limiter_(std::move(rate_limiter)) {} Envoy::TimeSource& timeSource() override { return rate_limiter_->timeSource(); } std::chrono::nanoseconds elapsed() override { return rate_limiter_->elapsed(); } + absl::optional firstAcquisitionTime() const override { + return rate_limiter_->firstAcquisitionTime(); + } protected: const RateLimiterPtr rate_limiter_; @@ -125,12 +134,12 @@ class ScheduledStartingRateLimiter : public ForwardingRateLimiterImpl, * @param scheduled_starting_time The starting time */ ScheduledStartingRateLimiter(RateLimiterPtr&& rate_limiter, - const Envoy::SystemTime scheduled_starting_time); + const Envoy::MonotonicTime scheduled_starting_time); bool tryAcquireOne() override; void releaseOne() override; private: - const Envoy::SystemTime scheduled_starting_time_; + const Envoy::MonotonicTime scheduled_starting_time_; bool aquisition_attempted_{false}; }; diff --git a/source/common/sequencer_impl.h b/source/common/sequencer_impl.h index ff226b6d3..40f245d98 100644 --- a/source/common/sequencer_impl.h +++ b/source/common/sequencer_impl.h @@ -62,6 +62,8 @@ class SequencerImpl : public Sequencer, public Envoy::Logger::Loggableelapsed(); } + const RateLimiter& rate_limiter() const override { return *rate_limiter_; } + double completionsPerSecond() const override { const double usec = std::chrono::duration_cast(executionDuration()).count(); diff --git a/source/common/termination_predicate_impl.cc b/source/common/termination_predicate_impl.cc index d468562f8..d32f2006b 100644 --- a/source/common/termination_predicate_impl.cc +++ b/source/common/termination_predicate_impl.cc @@ -16,8 +16,8 @@ TerminationPredicate::Status TerminationPredicateBaseImpl::evaluateChain() { } TerminationPredicate::Status DurationTerminationPredicateImpl::evaluate() { - return time_source_.systemTime() - start_ > duration_ ? TerminationPredicate::Status::TERMINATE - : TerminationPredicate::Status::PROCEED; + return time_source_.monotonicTime() - start_ > duration_ ? TerminationPredicate::Status::TERMINATE + : TerminationPredicate::Status::PROCEED; } TerminationPredicate::Status StatsCounterAbsoluteThresholdTerminationPredicateImpl::evaluate() { diff --git a/source/common/termination_predicate_impl.h b/source/common/termination_predicate_impl.h index 9a23a8f02..c1c761345 100644 --- a/source/common/termination_predicate_impl.h +++ b/source/common/termination_predicate_impl.h @@ -35,13 +35,13 @@ class DurationTerminationPredicateImpl : public TerminationPredicateBaseImpl { public: DurationTerminationPredicateImpl(Envoy::TimeSource& time_source, std::chrono::microseconds duration, - const Envoy::SystemTime start) + const Envoy::MonotonicTime start) : time_source_(time_source), start_(start), duration_(duration) {} TerminationPredicate::Status evaluate() override; private: Envoy::TimeSource& time_source_; - const Envoy::SystemTime start_; + const Envoy::MonotonicTime start_; std::chrono::microseconds duration_; }; diff --git a/test/BUILD b/test/BUILD index 326d971ee..a56d270ef 100644 --- a/test/BUILD +++ b/test/BUILD @@ -154,6 +154,7 @@ envoy_cc_test( "//test/test_common:environment_lib", "@envoy//test/test_common:network_utility_lib", "@envoy//test/test_common:registry_lib", + "@envoy//test/test_common:simulated_time_system_lib", ], ) diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 5f00cc723..8ffdf6680 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -118,7 +118,7 @@ TEST_F(ClientWorkerTest, BasicTest) { auto worker = std::make_unique( *api_, tls_, cluster_manager_ptr_, benchmark_client_factory_, termination_predicate_factory_, sequencer_factory_, request_generator_factory_, store_, worker_number, - time_system_.systemTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); + time_system_.monotonicTime(), http_tracer_, ClientWorkerImpl::HardCodedWarmupStyle::ON); worker->start(); worker->waitForCompletion(); diff --git a/test/factories_test.cc b/test/factories_test.cc index 21cf49c9e..946ed1d45 100644 --- a/test/factories_test.cc +++ b/test/factories_test.cc @@ -198,7 +198,7 @@ class SequencerFactoryTest }; auto sequencer = factory.create(api_->timeSource(), dispatcher_, dummy_sequencer_target, std::make_unique(), stats_store_, - time_system.systemTime() + 10ms); + time_system.monotonicTime() + 10ms); EXPECT_NE(nullptr, sequencer.get()); } }; diff --git a/test/mocks/client/mock_options.h b/test/mocks/client/mock_options.h index be3c6635a..04fc35ec0 100644 --- a/test/mocks/client/mock_options.h +++ b/test/mocks/client/mock_options.h @@ -57,6 +57,7 @@ class MockOptions : public Options { MOCK_CONST_METHOD0(statsSinks, std::vector()); MOCK_CONST_METHOD0(statsFlushInterval, uint32_t()); MOCK_CONST_METHOD0(responseHeaderWithLatencyInput, std::string()); + MOCK_CONST_METHOD0(scheduled_start, absl::optional()); }; } // namespace Client diff --git a/test/mocks/common/mock_rate_limiter.h b/test/mocks/common/mock_rate_limiter.h index a07062b47..a36bc154f 100644 --- a/test/mocks/common/mock_rate_limiter.h +++ b/test/mocks/common/mock_rate_limiter.h @@ -14,6 +14,7 @@ class MockRateLimiter : public RateLimiter { MOCK_METHOD0(releaseOne, void()); MOCK_METHOD0(timeSource, Envoy::TimeSource&()); MOCK_METHOD0(elapsed, std::chrono::nanoseconds()); + MOCK_CONST_METHOD0(firstAcquisitionTime, absl::optional()); }; class MockDiscreteNumericDistributionSampler : public DiscreteNumericDistributionSampler { diff --git a/test/mocks/common/mock_sequencer.h b/test/mocks/common/mock_sequencer.h index 52014362b..7d22434b7 100644 --- a/test/mocks/common/mock_sequencer.h +++ b/test/mocks/common/mock_sequencer.h @@ -1,5 +1,6 @@ #pragma once +#include "nighthawk/common/rate_limiter.h" #include "nighthawk/common/sequencer.h" #include "gmock/gmock.h" @@ -16,6 +17,7 @@ class MockSequencer : public Sequencer { MOCK_CONST_METHOD0(executionDuration, std::chrono::nanoseconds()); MOCK_CONST_METHOD0(statistics, StatisticPtrMap()); MOCK_METHOD0(cancel, void()); + MOCK_CONST_METHOD0(rate_limiter, RateLimiter&()); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_sequencer_factory.h b/test/mocks/common/mock_sequencer_factory.h index 96983e24f..63c972f26 100644 --- a/test/mocks/common/mock_sequencer_factory.h +++ b/test/mocks/common/mock_sequencer_factory.h @@ -14,7 +14,7 @@ class MockSequencerFactory : public SequencerFactory { const SequencerTarget& sequencer_target, TerminationPredicatePtr&& termination_predicate, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time)); + const Envoy::MonotonicTime scheduled_starting_time)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_termination_predicate_factory.h b/test/mocks/common/mock_termination_predicate_factory.h index 23aed4bf2..e37e8f128 100644 --- a/test/mocks/common/mock_termination_predicate_factory.h +++ b/test/mocks/common/mock_termination_predicate_factory.h @@ -12,7 +12,7 @@ class MockTerminationPredicateFactory : public TerminationPredicateFactory { MOCK_CONST_METHOD3(create, TerminationPredicatePtr(Envoy::TimeSource& time_source, Envoy::Stats::Scope& scope, - const Envoy::SystemTime scheduled_starting_time)); + const Envoy::MonotonicTime scheduled_starting_time)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/output_formatter_test.cc b/test/output_formatter_test.cc index 8e23185b7..9b334fcc7 100644 --- a/test/output_formatter_test.cc +++ b/test/output_formatter_test.cc @@ -85,9 +85,9 @@ class OutputCollectorTest : public Test { void setupCollector() { collector_ = std::make_unique(time_system_, options_); - collector_->addResult("worker_0", statistics_, counters_, 1s); - collector_->addResult("worker_1", statistics_, counters_, 1s); - collector_->addResult("global", statistics_, counters_, 1s); + collector_->addResult("worker_0", statistics_, counters_, 1s, time_system_.systemTime()); + collector_->addResult("worker_1", statistics_, counters_, 1s, absl::nullopt); + collector_->addResult("global", statistics_, counters_, 1s, time_system_.systemTime()); } nighthawk::client::CommandLineOptions command_line_options_; diff --git a/test/process_test.cc b/test/process_test.cc index e0bae9a71..7d1191790 100644 --- a/test/process_test.cc +++ b/test/process_test.cc @@ -1,3 +1,4 @@ +#include #include #include @@ -6,6 +7,7 @@ #include "external/envoy/test/test_common/environment.h" #include "external/envoy/test/test_common/network_utility.h" #include "external/envoy/test/test_common/registry.h" +#include "external/envoy/test/test_common/simulated_time_system.h" #include "external/envoy/test/test_common/utility.h" #include "common/uri_impl.h" @@ -178,6 +180,85 @@ TEST_P(ProcessTest, NoFlushWhenCancelExecutionBeforeLoadTestBegin) { EXPECT_EQ(numFlushes, 0); } +/** + * Fixture for executing the Nighthawk process with simulated time. + */ +class ProcessTestWithSimTime : public Envoy::Event::TestUsingSimulatedTime, + public TestWithParam { +public: + ProcessTestWithSimTime() + : options_(TestUtility::createOptionsImpl( + fmt::format("foo --duration 1 -v error --failure-predicate foo:0 --rps 10 https://{}/", + Envoy::Network::Test::getLoopbackAddressUrlString(GetParam())))){}; + +protected: + void run(std::function verify_callback) { + auto run_thread = std::thread([this, &verify_callback] { + ProcessPtr process = std::make_unique(*options_, simTime()); + OutputCollectorImpl collector(simTime(), *options_); + const bool result = process->run(collector); + process->shutdown(); + verify_callback(result, collector.toProto()); + }); + + // We introduce real-world sleeps to give the executing ProcessImpl + // an opportunity to observe passage of simulated time. We increase simulated + // time in three steps, to give it an opportunity to start at the wrong time + // in case there is an error in the scheduling logic it implements. + // Note that these sleeps may seem excessively long, but sanitizer runs may need that. + sleep(1); + // Move time to 1 second before the scheduled execution time. + simTime().setSystemTime(options_->scheduled_start().value() - 1s); + sleep(1); + // Move time right up to the scheduled execution time. + simTime().setSystemTime(options_->scheduled_start().value()); + sleep(1); + // Move time past the scheduled execution time and execution duration. + simTime().setSystemTime(options_->scheduled_start().value() + 2s); + // Wait for execution to wrap up. + run_thread.join(); + } + + void setScheduleOnOptions(std::chrono::nanoseconds ns_since_epoch) { + CommandLineOptionsPtr command_line = options_->toCommandLineOptions(); + *(command_line->mutable_scheduled_start()) = + Envoy::Protobuf::util::TimeUtil::NanosecondsToTimestamp(ns_since_epoch.count()); + options_ = std::make_unique(*command_line); + } + + OptionsPtr options_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ProcessTestWithSimTime, + ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest()), + Envoy::TestUtility::ipTestParamsToString); + +// Verify that scheduling execution ahead of time works, and that the execution start timestamp +// associated to the worker result correctly reflects the scheduled time. This should be spot on +// because we use simulated time. +TEST_P(ProcessTestWithSimTime, ScheduleAheadWorks) { + for (const auto& relative_schedule : std::vector{30s, 1h}) { + setScheduleOnOptions( + std::chrono::nanoseconds(simTime().systemTime().time_since_epoch() + relative_schedule)); + run([this](bool success, const nighthawk::client::Output& output) { + EXPECT_TRUE(success); + ASSERT_EQ(output.results_size(), 1); + EXPECT_EQ(Envoy::ProtobufUtil::TimeUtil::TimestampToNanoseconds( + output.results()[0].execution_start()), + options_->scheduled_start().value().time_since_epoch().count()); + }); + } +} + +// Verify that scheduling an execution in the past yields an error. +TEST_P(ProcessTestWithSimTime, ScheduleInThePastFails) { + setScheduleOnOptions(std::chrono::nanoseconds(simTime().systemTime().time_since_epoch() - 1s)); + run([](bool success, const nighthawk::client::Output& output) { + EXPECT_FALSE(success); + EXPECT_EQ(output.results_size(), 0); + }); +} + } // namespace } // namespace Client } // namespace Nighthawk diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index 296f88373..935cdada6 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -76,7 +76,8 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { // scheduled delay. This should be business as usual from a functional perspective, but internally // this rate limiter specializes on this case to log a warning message, and we want to cover that. for (const bool starting_late : std::vector{false, true}) { - const Envoy::SystemTime scheduled_starting_time = time_system.systemTime() + schedule_delay; + const Envoy::MonotonicTime scheduled_starting_time = + time_system.monotonicTime() + schedule_delay; std::unique_ptr mock_rate_limiter = std::make_unique(); MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; InSequence s; @@ -95,7 +96,7 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { } // We should expect zero releases until it is time to start. - while (time_system.systemTime() < scheduled_starting_time) { + while (time_system.monotonicTime() < scheduled_starting_time) { EXPECT_FALSE(rate_limiter->tryAcquireOne()); time_system.advanceTimeWait(1ms); } @@ -108,8 +109,8 @@ TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTest) { TEST_F(RateLimiterTest, ScheduledStartingRateLimiterTestBadArgs) { Envoy::Event::SimulatedTimeSystem time_system; // Verify we enforce future-only scheduling. - for (const auto& timing : - std::vector{time_system.systemTime(), time_system.systemTime() - 10ms}) { + for (const auto& timing : std::vector{time_system.monotonicTime(), + time_system.monotonicTime() - 10ms}) { std::unique_ptr mock_rate_limiter = std::make_unique(); MockRateLimiter& unsafe_mock_rate_limiter = *mock_rate_limiter; EXPECT_CALL(unsafe_mock_rate_limiter, timeSource) diff --git a/test/termination_predicate_test.cc b/test/termination_predicate_test.cc index 387883152..4ab0bab53 100644 --- a/test/termination_predicate_test.cc +++ b/test/termination_predicate_test.cc @@ -25,7 +25,7 @@ class TerminationPredicateTest : public Test { TEST_F(TerminationPredicateTest, DurationTerminationPredicateImplTest) { const auto duration = 100us; - DurationTerminationPredicateImpl pred(time_system, duration, time_system.systemTime()); + DurationTerminationPredicateImpl pred(time_system, duration, time_system.monotonicTime()); EXPECT_EQ(pred.evaluate(), TerminationPredicate::Status::PROCEED); // move to the edge. time_system.advanceTimeWait(duration); diff --git a/test/test_data/output_formatter.json.gold b/test/test_data/output_formatter.json.gold index 1048bc9fd..d1f142906 100644 --- a/test/test_data/output_formatter.json.gold +++ b/test/test_data/output_formatter.json.gold @@ -190,7 +190,8 @@ "value": "1" } ], - "execution_duration": "1s" + "execution_duration": "1s", + "execution_start": "2009-02-13T23:31:31.567Z" }, { "name": "worker_1", @@ -558,7 +559,8 @@ "value": "1" } ], - "execution_duration": "1s" + "execution_duration": "1s", + "execution_start": "2009-02-13T23:31:31.567Z" } ], "version": { diff --git a/test/test_data/output_formatter.yaml.gold b/test/test_data/output_formatter.yaml.gold index bc1b9b750..6a4255c66 100644 --- a/test/test_data/output_formatter.yaml.gold +++ b/test/test_data/output_formatter.yaml.gold @@ -126,6 +126,7 @@ results: - name: foo value: 1 execution_duration: 1s + execution_start: 2009-02-13T23:31:31.567Z - name: worker_1 statistics: - count: 3 @@ -358,6 +359,7 @@ results: - name: foo value: 1 execution_duration: 1s + execution_start: 2009-02-13T23:31:31.567Z version: version: major_number: @version_major@ From 2942dc9b1527d93898b0de54595d01812c68bce6 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 8 Dec 2020 05:09:29 +0100 Subject: [PATCH 43/63] Add a flag to allow for continued use of the deprecated v2 api (#584) - Adds a new proto/cli option, which allows requesting continued use of the about to be deprecated v2 api's. - Adds a new fixture / integration test for the test server, as a sanity check for making sure that when we update Envoy to the revision that deprecated v2 things will keep working as expected, while documenting how to set up the runtime configuration. Signed-off-by: Otto van der Schaaf --- README.md | 7 ++- api/client/options.proto | 4 +- include/nighthawk/client/options.h | 2 + source/client/options_impl.cc | 11 ++++ source/client/options_impl.h | 2 + source/client/process_impl.cc | 25 +++++++-- source/client/process_impl.h | 11 +++- test/integration/BUILD | 1 + ...k_http_origin_envoy_deprecated_v2_api.yaml | 39 ++++++++++++++ test/integration/integration_test_fixtures.py | 42 +++++++++++++-- test/integration/nighthawk_test_server.py | 30 +++++++++-- test/integration/test_integration_basics.py | 51 +++++++++++++++++-- test/mocks/client/mock_options.h | 1 + test/options_test.cc | 21 +++++++- test/process_test.cc | 27 ++++++++++ 15 files changed, 253 insertions(+), 21 deletions(-) create mode 100644 test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml diff --git a/README.md b/README.md index f5a0d58ca..36df4aee8 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,8 @@ bazel build -c opt //:nighthawk ``` USAGE: -bazel-bin/nighthawk_client [--latency-response-header-name ] +bazel-bin/nighthawk_client [--allow-envoy-deprecated-v2-api] +[--latency-response-header-name ] [--stats-flush-interval ] [--stats-sinks ] ... [--no-duration] [--simple-warmup] @@ -83,6 +84,10 @@ format> Where: +--allow-envoy-deprecated-v2-api +Set to allow usage of the v2 api. (Not recommended, support will stop +in Q1 2021). Default: false + --latency-response-header-name Set an optional header name that will be returned in responses, whose values will be tracked in a latency histogram if set. Can be used in diff --git a/api/client/options.proto b/api/client/options.proto index d1108e908..3274dd015 100644 --- a/api/client/options.proto +++ b/api/client/options.proto @@ -106,7 +106,7 @@ message H1ConnectionReuseStrategy { // TODO(oschaaf): Ultimately this will be a load test specification. The fact that it // can arrive via CLI is just a concrete detail. Change this to reflect that. -// highest unused number is 38 +// highest unused number is 39 message CommandLineOptions { // The target requests-per-second rate. Default: 5. google.protobuf.UInt32Value requests_per_second = 1 @@ -220,6 +220,8 @@ message CommandLineOptions { // "emit_previous_request_delta_in_response_header" to record elapsed time between request // arrivals. google.protobuf.StringValue latency_response_header_name = 36; + // Set to allow usage of the v2 api. (Not recommended, support will stop in Q1 2021). + google.protobuf.BoolValue allow_envoy_deprecated_v2_api = 38 [deprecated = true]; // Provide an execution starting date and time. Optional, any value specified must be in the // future. google.protobuf.Timestamp scheduled_start = 105; diff --git a/include/nighthawk/client/options.h b/include/nighthawk/client/options.h index 5e6dc8292..a04292a86 100644 --- a/include/nighthawk/client/options.h +++ b/include/nighthawk/client/options.h @@ -75,6 +75,8 @@ class Options { virtual std::vector statsSinks() const PURE; virtual uint32_t statsFlushInterval() const PURE; virtual std::string responseHeaderWithLatencyInput() const PURE; + virtual bool allowEnvoyDeprecatedV2Api() const PURE; + virtual absl::optional scheduled_start() const PURE; /** * Converts an Options instance to an equivalent CommandLineOptions instance in terms of option diff --git a/source/client/options_impl.cc b/source/client/options_impl.cc index b4ff7ae3e..0ed825fe7 100644 --- a/source/client/options_impl.cc +++ b/source/client/options_impl.cc @@ -315,6 +315,12 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { "Default: \"\"", false, "", "string", cmd); + TCLAP::SwitchArg allow_envoy_deprecated_v2_api( + "", "allow-envoy-deprecated-v2-api", + "Set to allow usage of the v2 api. (Not recommended, support will stop in Q1 2021). Default: " + "false", + cmd); + Utility::parseCommand(cmd, argc, argv); // --duration and --no-duration are mutually exclusive @@ -447,6 +453,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { } TCLAP_SET_IF_SPECIFIED(stats_flush_interval, stats_flush_interval_); TCLAP_SET_IF_SPECIFIED(latency_response_header_name, latency_response_header_name_); + TCLAP_SET_IF_SPECIFIED(allow_envoy_deprecated_v2_api, allow_envoy_deprecated_v2_api_); // CLI-specific tests. // TODO(oschaaf): as per mergconflicts's remark, it would be nice to aggregate @@ -652,6 +659,8 @@ OptionsImpl::OptionsImpl(const nighthawk::client::CommandLineOptions& options) { std::copy(options.labels().begin(), options.labels().end(), std::back_inserter(labels_)); latency_response_header_name_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( options, latency_response_header_name, latency_response_header_name_); + allow_envoy_deprecated_v2_api_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( + options, allow_envoy_deprecated_v2_api, allow_envoy_deprecated_v2_api_); if (options.has_scheduled_start()) { const auto elapsed_since_epoch = std::chrono::nanoseconds(options.scheduled_start().nanos()) + std::chrono::seconds(options.scheduled_start().seconds()); @@ -834,6 +843,8 @@ CommandLineOptionsPtr OptionsImpl::toCommandLineOptionsInternal() const { command_line_options->mutable_stats_flush_interval()->set_value(stats_flush_interval_); command_line_options->mutable_latency_response_header_name()->set_value( latency_response_header_name_); + command_line_options->mutable_allow_envoy_deprecated_v2_api()->set_value( + allow_envoy_deprecated_v2_api_); if (scheduled_start_.has_value()) { *(command_line_options->mutable_scheduled_start()) = Envoy::ProtobufUtil::TimeUtil::NanosecondsToTimestamp( diff --git a/source/client/options_impl.h b/source/client/options_impl.h index 7132aba01..b84d80d3e 100644 --- a/source/client/options_impl.h +++ b/source/client/options_impl.h @@ -93,6 +93,7 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable scheduled_start() const override { return scheduled_start_; } private: @@ -150,6 +151,7 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable stats_sinks_; uint32_t stats_flush_interval_{5}; std::string latency_response_header_name_; + bool allow_envoy_deprecated_v2_api_{false}; absl::optional scheduled_start_; }; diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index cbf3b695d..dc8560238 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -276,10 +276,28 @@ ProcessImpl::mergeWorkerStatistics(const std::vector& workers) return merged_statistics; } +void ProcessImpl::allowEnvoyDeprecatedV2Api(envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); + admin_layer->set_name("admin layer"); + admin_layer->mutable_admin_layer(); + envoy::config::bootstrap::v3::RuntimeLayer* runtime_layer = + bootstrap.mutable_layered_runtime()->add_layers(); + runtime_layer->set_name("static_layer"); + Envoy::ProtobufWkt::Value proto_true; + proto_true.set_string_value("true"); + (*runtime_layer->mutable_static_layer() + ->mutable_fields())["envoy.reloadable_features.enable_deprecated_v2_api"] = proto_true; +} + void ProcessImpl::createBootstrapConfiguration(envoy::config::bootstrap::v3::Bootstrap& bootstrap, const std::vector& uris, const UriPtr& request_source_uri, - int number_of_clusters) const { + int number_of_clusters, + bool allow_envoy_deprecated_v2_api) const { + if (allow_envoy_deprecated_v2_api) { + allowEnvoyDeprecatedV2Api(bootstrap); + } + for (int i = 0; i < number_of_clusters; i++) { auto* cluster = bootstrap.mutable_static_resources()->add_clusters(); RELEASE_ASSERT(!uris.empty(), "illegal configuration with zero endpoints"); @@ -466,7 +484,8 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vector( Envoy::Runtime::LoaderPtr{new Envoy::Runtime::LoaderImpl( - *dispatcher_, tls_, {}, *local_info_, store_root_, generator_, + *dispatcher_, tls_, bootstrap.layered_runtime(), *local_info_, store_root_, generator_, Envoy::ProtobufMessage::getStrictValidationVisitor(), *api_)}); ssl_context_manager_ = std::make_unique( diff --git a/source/client/process_impl.h b/source/client/process_impl.h index d14b86e6a..03a99a72a 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -30,6 +30,7 @@ #include "external/envoy/source/exe/process_wide.h" #include "external/envoy/source/extensions/transport_sockets/tls/context_manager_impl.h" #include "external/envoy/source/server/config_validation/admin.h" +#include "external/envoy_api/envoy/config/bootstrap/v3/bootstrap.pb.h" #include "client/benchmark_client_impl.h" #include "client/factories_impl.h" @@ -84,6 +85,12 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable& uris, - const UriPtr& request_source_uri, int number_of_workers) const; + const UriPtr& request_source_uri, int number_of_workers, + bool allow_envoy_deprecated_v2_api) const; void maybeCreateTracingDriver(const envoy::config::trace::v3::Tracing& configuration); - void configureComponentLogLevels(spdlog::level::level_enum level); /** * Prepare the ProcessImpl instance by creating and configuring the workers it needs for execution diff --git a/test/integration/BUILD b/test/integration/BUILD index 1290e6090..c9cd5ad68 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -13,6 +13,7 @@ py_library( name = "integration_test_base", data = [ "configurations/nighthawk_http_origin.yaml", + "configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml", "configurations/nighthawk_https_origin.yaml", "configurations/nighthawk_track_timings.yaml", "configurations/sni_origin.yaml", diff --git a/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml b/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml new file mode 100644 index 000000000..0e795b6ba --- /dev/null +++ b/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml @@ -0,0 +1,39 @@ +# This file is intentionally using the v2 api: it is used to test support for that. +admin: + access_log_path: $tmpdir/nighthawk-test-server-admin-access.log + profile_path: $tmpdir/nighthawk-test-server.prof + address: + socket_address: { address: $server_ip, port_value: 0 } +static_resources: + listeners: + - address: + socket_address: + address: $server_ip + port_value: 0 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + generate_request_id: false + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: service + domains: + - "*" + http_filters: + - name: test-server + config: + response_body_size: 10 + response_headers: + - { header: { key: "x-nh", value: "1"}} + - name: envoy.router + config: + dynamic_stats: false +layered_runtime: + layers: + - name: static_layer + static_layer: + envoy.reloadable_features.enable_deprecated_v2_api: true diff --git a/test/integration/integration_test_fixtures.py b/test/integration/integration_test_fixtures.py index 7850eef26..34fea1d15 100644 --- a/test/integration/integration_test_fixtures.py +++ b/test/integration/integration_test_fixtures.py @@ -56,13 +56,15 @@ class IntegrationTestBase(): This class will be refactored (https://github.com/envoyproxy/nighthawk/issues/258). """ - def __init__(self, ip_version, server_config, backend_count=1): + def __init__(self, ip_version, server_config, backend_count=1, bootstrap_version_arg=None): """Initialize the IntegrationTestBase instance. Args: ip_version: a single IP mode that this instance will test: IpVersion.IPV4 or IpVersion.IPV6 server_config: path to the server configuration backend_count: number of Nighthawk Test Server backends to run, to allow testing MultiTarget mode + bootstrap_version_arg: An optional int, specify a bootstrap cli argument value for the test server binary. If None is specified, no bootstrap cli argment will be passed. + Attributes: ip_version: IP version that the proxy should use when listening. server_ip: string containing the server ip that will be used to listen @@ -90,6 +92,7 @@ def __init__(self, ip_version, server_config, backend_count=1): self._test_servers = [] self._backend_count = backend_count self._test_id = "" + self._bootstrap_version_arg = bootstrap_version_arg # TODO(oschaaf): For the NH test server, add a way to let it determine a port by itself and pull that # out. @@ -141,7 +144,8 @@ def _tryStartTestServers(self): self.server_ip, self.ip_version, parameters=self.parameters, - tag=self.tag) + tag=self.tag, + bootstrap_version_arg=self._bootstrap_version_arg) if not test_server.start(): return False self._test_servers.append(test_server) @@ -283,7 +287,7 @@ def startNighthawkGrpcService(self, service_name="traffic-generator-service"): class HttpIntegrationTestBase(IntegrationTestBase): """Base for running plain http tests against the Nighthawk test server. - NOTE: any script that consumes derivations of this, needs to needs also explictly + NOTE: any script that consumes derivations of this, needs to also explicitly import server_config, to avoid errors caused by the server_config not being found by pytest. """ @@ -297,6 +301,25 @@ def getTestServerRootUri(self): return super(HttpIntegrationTestBase, self).getTestServerRootUri(False) +class HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap(IntegrationTestBase): + """Base for running plain http tests against the Nighthawk test server. + + NOTE: any script that consumes derivations of this, needs to also explicitly + import server_config, to avoid errors caused by the server_config not being found + by pytest. + """ + + def __init__(self, ip_version, server_config): + """See base class.""" + super(HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap, + self).__init__(ip_version, server_config, bootstrap_version_arg=2) + + def getTestServerRootUri(self): + """See base class.""" + return super(HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap, + self).getTestServerRootUri(False) + + class MultiServerHttpIntegrationTestBase(IntegrationTestBase): """Base for running plain http tests against multiple Nighthawk test servers.""" @@ -378,6 +401,19 @@ def http_test_server_fixture(request, server_config): f.tearDown() +@pytest.fixture(params=determineIpVersionsFromEnvironment()) +def http_test_server_fixture_envoy_deprecated_v2_api(request, server_config): + """Fixture for setting up a test environment with http server configuration that uses v2 configuration. + + Yields: + HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap: A fully set up instance. Tear down will happen automatically. + """ + f = HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap(request.param, server_config) + f.setUp() + yield f + f.tearDown() + + @pytest.fixture(params=determineIpVersionsFromEnvironment()) def https_test_server_fixture(request, server_config): """Fixture for setting up a test environment with the stock https server configuration. diff --git a/test/integration/nighthawk_test_server.py b/test/integration/nighthawk_test_server.py index e23911a3e..b4ee5d056 100644 --- a/test/integration/nighthawk_test_server.py +++ b/test/integration/nighthawk_test_server.py @@ -51,8 +51,15 @@ class TestServerBase(object): tmpdir: String, indicates the location used to store outputs like logs. """ - def __init__(self, server_binary_path, config_template_path, server_ip, ip_version, - server_binary_config_path_arg, parameters, tag): + def __init__(self, + server_binary_path, + config_template_path, + server_ip, + ip_version, + server_binary_config_path_arg, + parameters, + tag, + bootstrap_version_arg=None): """Initialize a TestServerBase instance. Args: @@ -63,6 +70,7 @@ def __init__(self, server_binary_path, config_template_path, server_ip, ip_versi server_binary_config_path_arg (str): Specify the name of the CLI argument the test server binary uses to accept a configuration path. parameters (dict): Supply to provide configuration template parameter replacement values. tag (str): Supply to get recognizeable output locations. + bootstrap_version_arg (int, optional): specify a bootstrap cli argument value for the test server binary. """ assert ip_version != IpVersion.UNKNOWN self.ip_version = ip_version @@ -82,6 +90,7 @@ def __init__(self, server_binary_path, config_template_path, server_ip, ip_versi self._parameterized_config_path = "" self._instance_id = str(random.randint(1, 1024 * 1024 * 1024)) self._server_binary_config_path_arg = server_binary_config_path_arg + self._bootstrap_version_arg = bootstrap_version_arg self._prepareForExecution() def _prepareForExecution(self): @@ -121,6 +130,9 @@ def _serverThreadRunner(self): self._parameterized_config_path, "-l", "debug", "--base-id", self._instance_id, "--admin-address-path", self._admin_address_path, "--concurrency", "1" ] + if self._bootstrap_version_arg is not None: + args = args + ["--bootstrap-version", str(self._bootstrap_version_arg)] + logging.info("Test server popen() args: %s" % str.join(" ", args)) self._server_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = self._server_process.communicate() @@ -228,7 +240,8 @@ def __init__(self, server_ip, ip_version, parameters=dict(), - tag=""): + tag="", + bootstrap_version_arg=None): """Initialize a NighthawkTestServer instance. Args: @@ -238,9 +251,16 @@ def __init__(self, ip_version (IPVersion): IPVersion enum member indicating the ip version that the server should use when listening. parameters (dictionary, optional): Directionary with replacement values for substition purposes in the server configuration template. Defaults to dict(). tag (str, optional): Tags. Supply this to get recognizeable output locations. Defaults to "". + bootstrap_version_arg (String, optional): Specify a cli argument value for --bootstrap-version when running the server. """ - super(NighthawkTestServer, self).__init__(server_binary_path, config_template_path, server_ip, - ip_version, "--config-path", parameters, tag) + super(NighthawkTestServer, self).__init__(server_binary_path, + config_template_path, + server_ip, + ip_version, + "--config-path", + parameters, + tag, + bootstrap_version_arg=bootstrap_version_arg) def getCliVersionString(self): """Get the version string as written to the output by the CLI.""" diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index 3fe989aa6..630095abb 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -10,11 +10,10 @@ from threading import Thread from test.integration.common import IpVersion -from test.integration.integration_test_fixtures import (http_test_server_fixture, - https_test_server_fixture, - multi_http_test_server_fixture, - multi_https_test_server_fixture, - server_config) +from test.integration.integration_test_fixtures import ( + http_test_server_fixture, http_test_server_fixture_envoy_deprecated_v2_api, + https_test_server_fixture, https_test_server_fixture, multi_http_test_server_fixture, + multi_https_test_server_fixture, server_config) from test.integration import asserts from test.integration import utility @@ -69,6 +68,48 @@ def test_http_h1(http_test_server_fixture): asserts.assertEqual(len(counters), 12) +@pytest.mark.parametrize('server_config', [ + "nighthawk/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml" +]) +def test_nighthawk_test_server_envoy_deprecated_v2_api( + http_test_server_fixture_envoy_deprecated_v2_api): + """Test that the v2 configuration works for the test server.""" + parsed_json, _ = http_test_server_fixture_envoy_deprecated_v2_api.runNighthawkClient([ + http_test_server_fixture_envoy_deprecated_v2_api.getTestServerRootUri(), "--duration", "100", + "--termination-predicate", "benchmark.http_2xx:24" + ]) + + counters = http_test_server_fixture_envoy_deprecated_v2_api.getNighthawkCounterMapFromJson( + parsed_json) + asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) + + +def test_nighthawk_client_v2_api_explicitly_set(http_test_server_fixture): + """Test that the v2 api works when requested to.""" + parsed_json, _ = http_test_server_fixture.runNighthawkClient([ + http_test_server_fixture.getTestServerRootUri(), "--duration", "100", + "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate", + "foo:1", "--allow-envoy-deprecated-v2-api", "--transport-socket", + "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}" + ]) + + counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + asserts.assertCounterEqual(counters, "benchmark.pool_connection_failure", 1) + + +# TODO(oschaaf): This ought to work after the Envoy update. +def DISABLED_test_nighthawk_client_v2_api_breaks_by_default(http_test_server_fixture): + """Test that the v2 api breaks us when it's not explicitly requested.""" + _, _ = http_test_server_fixture.runNighthawkClient([ + http_test_server_fixture.getTestServerRootUri(), "--duration", "100", + "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate", + "foo:1", "--transport-socket", + "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}" + ], + expect_failure=True, + as_json=False) + + def _mini_stress_test(fixture, args): # run a test with more rps then we can handle, and a very small client-side queue. # we should observe both lots of successfull requests as well as time spend in blocking mode., diff --git a/test/mocks/client/mock_options.h b/test/mocks/client/mock_options.h index 04fc35ec0..a6e85d42c 100644 --- a/test/mocks/client/mock_options.h +++ b/test/mocks/client/mock_options.h @@ -57,6 +57,7 @@ class MockOptions : public Options { MOCK_CONST_METHOD0(statsSinks, std::vector()); MOCK_CONST_METHOD0(statsFlushInterval, uint32_t()); MOCK_CONST_METHOD0(responseHeaderWithLatencyInput, std::string()); + MOCK_CONST_METHOD0(allowEnvoyDeprecatedV2Api, bool()); MOCK_CONST_METHOD0(scheduled_start, absl::optional()); }; diff --git a/test/options_test.cc b/test/options_test.cc index d65604b67..a1f6c143b 100644 --- a/test/options_test.cc +++ b/test/options_test.cc @@ -118,7 +118,7 @@ TEST_F(OptionsImplTest, AlmostAll) { "--experimental-h2-use-multiple-connections " "--experimental-h1-connection-reuse-strategy lru --label label1 --label label2 {} " "--simple-warmup --stats-sinks {} --stats-sinks {} --stats-flush-interval 10 " - "--latency-response-header-name zz", + "--latency-response-header-name zz --allow-envoy-deprecated-v2-api", client_name_, "{name:\"envoy.transport_sockets.tls\"," "typed_config:{\"@type\":\"type.googleapis.com/" @@ -193,6 +193,7 @@ TEST_F(OptionsImplTest, AlmostAll) { "183412668: \"envoy.config.metrics.v2.StatsSink\"\n", options->statsSinks()[1].DebugString()); EXPECT_EQ("zz", options->responseHeaderWithLatencyInput()); + EXPECT_TRUE(options->allowEnvoyDeprecatedV2Api()); // Check that our conversion to CommandLineOptionsPtr makes sense. CommandLineOptionsPtr cmd = options->toCommandLineOptions(); @@ -251,6 +252,8 @@ TEST_F(OptionsImplTest, AlmostAll) { EXPECT_TRUE(util(cmd->stats_sinks(0), options->statsSinks()[0])); EXPECT_TRUE(util(cmd->stats_sinks(1), options->statsSinks()[1])); EXPECT_EQ(cmd->latency_response_header_name().value(), options->responseHeaderWithLatencyInput()); + ASSERT_TRUE(cmd->has_allow_envoy_deprecated_v2_api()); + EXPECT_EQ(cmd->allow_envoy_deprecated_v2_api().value(), options->allowEnvoyDeprecatedV2Api()); // TODO(#433) Here and below, replace comparisons once we choose a proto diff. OptionsImpl options_from_proto(*cmd); std::string s1 = Envoy::MessageUtil::getYamlStringFromMessage( @@ -594,6 +597,22 @@ TEST_F(OptionsImplTest, PrefetchConnectionsFlag) { MalformedArgvException, "Couldn't find match for argument"); } +TEST_F(OptionsImplTest, AllowEnvoyDeprecatedV2ApiFlag) { + EXPECT_FALSE(TestUtility::createOptionsImpl(fmt::format("{} {}", client_name_, good_test_uri_)) + ->allowEnvoyDeprecatedV2Api()); + EXPECT_TRUE(TestUtility::createOptionsImpl(fmt::format("{} --allow-envoy-deprecated-v2-api {}", + client_name_, good_test_uri_)) + ->allowEnvoyDeprecatedV2Api()); + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl( + fmt::format("{} --allow-envoy-deprecated-v2-api 0 {}", client_name_, good_test_uri_)), + MalformedArgvException, "Couldn't find match for argument"); + EXPECT_THROW_WITH_REGEX( + TestUtility::createOptionsImpl( + fmt::format("{} --allow-envoy-deprecated-v2-api true {}", client_name_, good_test_uri_)), + MalformedArgvException, "Couldn't find match for argument"); +} + // Test --concurrency, which is a bit special. It's an int option, which also accepts 'auto' as // a value. We need to implement some stuff ourselves to get this to work, hence we don't run it // through the OptionsImplIntTest. diff --git a/test/process_test.cc b/test/process_test.cc index 7d1191790..077f60ef4 100644 --- a/test/process_test.cc +++ b/test/process_test.cc @@ -9,6 +9,7 @@ #include "external/envoy/test/test_common/registry.h" #include "external/envoy/test/test_common/simulated_time_system.h" #include "external/envoy/test/test_common/utility.h" +#include "external/envoy_api/envoy/config/bootstrap/v3/bootstrap.pb.h" #include "common/uri_impl.h" @@ -180,6 +181,32 @@ TEST_P(ProcessTest, NoFlushWhenCancelExecutionBeforeLoadTestBegin) { EXPECT_EQ(numFlushes, 0); } +TEST(RuntimeConfiguration, allowEnvoyDeprecatedV2Api) { + envoy::config::bootstrap::v3::Bootstrap bootstrap; + EXPECT_EQ(bootstrap.DebugString(), ""); + ProcessImpl::allowEnvoyDeprecatedV2Api(bootstrap); + std::cerr << bootstrap.DebugString() << std::endl; + EXPECT_EQ(bootstrap.DebugString(), R"EOF(layered_runtime { + layers { + name: "admin layer" + admin_layer { + } + } + layers { + name: "static_layer" + static_layer { + fields { + key: "envoy.reloadable_features.enable_deprecated_v2_api" + value { + string_value: "true" + } + } + } + } +} +)EOF"); +} + /** * Fixture for executing the Nighthawk process with simulated time. */ From a4260dc312f8e08d337269627c9753c0c1ca50cc Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 8 Dec 2020 05:11:59 +0100 Subject: [PATCH 44/63] Update Envoy to 588d9344b31e6544869547c4bcd359b3b0f1d4cf (#575) - Fixes to accommodate upstream connection pool changes. - Fixes to accommodate upstream cluster related changes. Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- source/client/benchmark_client_impl.cc | 2 +- source/client/benchmark_client_impl.h | 4 ++-- source/client/process_impl.cc | 15 +++++++++++++-- source/common/request_source_impl.cc | 3 ++- source/server/README.md | 7 +------ 6 files changed, 21 insertions(+), 14 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 1592a3271..e3c7d62fe 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "8e6b176b89240d1b8ce3f3e4a8e276e4a40fcd1e" # November 10th, 2020 -ENVOY_SHA = "1eba9e904699bbc43c708f90c9e7b1354aed7bafe3784be2c6bfa04919cc67eb" +ENVOY_COMMIT = "588d9344b31e6544869547c4bcd359b3b0f1d4cf" # November 16th, 2020 +ENVOY_SHA = "45935eee5714b4d85e2eb264f6e1a922999ff8e5823a49fb0c4d1255494550a8" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 405bfc134..83008acad 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -75,7 +75,7 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, } // Vanilla Envoy pool behavior. - return ConnPoolImpl::newStream(response_decoder, callbacks); + return HttpConnPoolImplBase::newStream(response_decoder, callbacks); } BenchmarkClientHttpImpl::BenchmarkClientHttpImpl( diff --git a/source/client/benchmark_client_impl.h b/source/client/benchmark_client_impl.h index 0304d4460..7254f83ae 100644 --- a/source/client/benchmark_client_impl.h +++ b/source/client/benchmark_client_impl.h @@ -75,13 +75,13 @@ struct BenchmarkClientStatistic { StatisticPtr origin_latency_statistic; }; -class Http1PoolImpl : public Envoy::Http::Http1::ProdConnPoolImpl { +class Http1PoolImpl : public Envoy::Http::FixedHttpConnPoolImpl { public: enum class ConnectionReuseStrategy { MRU, LRU, }; - using Envoy::Http::Http1::ProdConnPoolImpl::ProdConnPoolImpl; + using Envoy::Http::FixedHttpConnPoolImpl::FixedHttpConnPoolImpl; Envoy::Http::ConnectionPool::Cancellable* newStream(Envoy::Http::ResponseDecoder& response_decoder, Envoy::Http::ConnectionPool::Callbacks& callbacks) override; diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index dc8560238..efef72125 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -72,8 +72,19 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options) override { if (protocol == Envoy::Http::Protocol::Http11 || protocol == Envoy::Http::Protocol::Http10) { - auto* h1_pool = new Http1PoolImpl(dispatcher, api_.randomGenerator(), host, priority, options, - transport_socket_options); + auto* h1_pool = new Http1PoolImpl( + host, priority, dispatcher, options, transport_socket_options, api_.randomGenerator(), + [](Envoy::Http::HttpConnPoolImplBase* pool) { + return std::make_unique(*pool); + }, + [](Envoy::Upstream::Host::CreateConnectionData& data, + Envoy::Http::HttpConnPoolImplBase* pool) { + Envoy::Http::CodecClientPtr codec{new Envoy::Http::CodecClientProd( + Envoy::Http::CodecClient::Type::HTTP1, std::move(data.connection_), + data.host_description_, pool->dispatcher(), pool->randomGenerator())}; + return codec; + }, + std::vector{protocol}); h1_pool->setConnectionReuseStrategy(connection_reuse_strategy_); h1_pool->setPrefetchConnections(prefetch_connections_); return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; diff --git a/source/common/request_source_impl.cc b/source/common/request_source_impl.cc index d7a23bf9d..c2ce7d4b5 100644 --- a/source/common/request_source_impl.cc +++ b/source/common/request_source_impl.cc @@ -36,7 +36,8 @@ RemoteRequestSourceImpl::RemoteRequestSourceImpl( void RemoteRequestSourceImpl::connectToRequestStreamGrpcService() { Envoy::TimeSource& time_source = dispatcher_.timeSource(); const auto clusters = cluster_manager_->clusters(); - const bool have_cluster = clusters.find(service_cluster_name_) != clusters.end(); + const bool have_cluster = + clusters.active_clusters_.find(service_cluster_name_) != clusters.active_clusters_.end(); ASSERT(have_cluster); const std::chrono::seconds STREAM_SETUP_TIMEOUT = 60s; envoy::config::core::v3::GrpcService grpc_service; diff --git a/source/server/README.md b/source/server/README.md index d98631126..5687360a7 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -175,8 +175,7 @@ bazel-bin/nighthawk_test_server [--socket-mode ] [--socket-path [--hot-restart-version] [--restart-epoch ] [--log-path ] -[--log-format-prefix-with-location -] [--enable-fine-grain-logging] +[--enable-fine-grain-logging] [--log-format-escaped] [--log-format ] [--component-log-level ] [-l ] @@ -252,10 +251,6 @@ hot restart epoch # --log-path Path to logfile ---log-format-prefix-with-location -Prefix all occurrences of '%v' in log format with with '[%g:%#] ' -('[path/to/file.cc:99] '). - --enable-fine-grain-logging Logger mode: enable file level log control(Fancy Logger)or not From 90af260222b7811e4e090d2ab110f14be1bf0a6e Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Mon, 7 Dec 2020 23:18:01 -0500 Subject: [PATCH 45/63] Updating documentation to use Envoy API v3. (#586) Verified that an Envoy built after `588d9344b31e6544869547c4bcd359b3b0f1d4cf` can load the configuration from the documents. Also: - fixing one typo in the documentation. - cosmetic changes of enum values to uppercase form. Works on #580 Signed-off-by: Jakub Sobon --- source/server/README.md | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/source/server/README.md b/source/server/README.md index 5687360a7..f1c27000d 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -34,10 +34,11 @@ static_resources: port_value: 10000 filter_chains: - filters: - - name: envoy.http_connection_manager - config: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager generate_request_id: false - codec_type: auto + codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route @@ -47,10 +48,12 @@ static_resources: - "*" http_filters: - name: dynamic-delay - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions static_delay: 0.5s - name: test-server # before envoy.router because order matters! - config: + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 response_headers: - { header: { key: "foo", value: "bar" } } @@ -59,8 +62,9 @@ static_resources: append: true, } - { header: { key: "x-nh", value: "1" } } - - name: envoy.router - config: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: false admin: access_log_path: /tmp/envoy.log @@ -146,7 +150,7 @@ same time. ``` # If you already have Envoy running, you might need to set --base-id to allow the test-server to start. -➜ /bazel-bin/nighthawk/source/server/server --config-path /path/to/test-server-server.yaml +➜ /bazel-bin/nighthawk/source/server/server --config-path /path/to/test-server.yaml # Verify the test server with a curl command similar to: ➜ curl -H "x-nighthawk-test-server-config: {response_body_size:20, static_delay: \"0s\"}" -vv 127.0.0.1:10000 From facd66b234964c27094d4d51b1fb09766c273119 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Mon, 7 Dec 2020 23:48:41 -0500 Subject: [PATCH 46/63] Deprecate Envoy API v2 primitives in configuration for our Envoy filters. (#588) Adds a new field `v3_response_headers` alongside the old v2 `response_headers` in `api/server/response_options.proto` and marks the old field as deprecated. The old field retains its functionality for backward compatibility. Since these are repeated fields, we cannot use a oneof. Instead validation is added to ensure the filters report an error on configuration that has both the v2 and the v3 fields set. `Envoy::EnvoyException` is thrown on validation errors to comply with the `Envoy::Server::Configuration::NamedHttpFilterConfigFactory` interface. Also: - sorting imports in `response_options.proto ` alphabetically. - adding missing anonymous namespace in `http_dynamic_delay_filter_integration_test.cc`. Works on #580. Signed-off-by: Jakub Sobon --- api/server/BUILD | 1 + api/server/response_options.proto | 15 +- source/server/BUILD | 2 + source/server/configuration.cc | 43 +++- source/server/configuration.h | 20 ++ .../http_dynamic_delay_filter_config.cc | 8 +- .../server/http_test_server_filter_config.cc | 9 +- .../http_time_tracking_filter_config.cc | 8 +- test/server/BUILD | 13 + test/server/configuration_test.cc | 235 ++++++++++++++++++ ...p_dynamic_delay_filter_integration_test.cc | 20 ++ ...ttp_test_server_filter_integration_test.cc | 51 +++- ...p_time_tracking_filter_integration_test.cc | 18 ++ 13 files changed, 427 insertions(+), 16 deletions(-) create mode 100644 test/server/configuration_test.cc diff --git a/api/server/BUILD b/api/server/BUILD index 9b00942d9..312e283e5 100644 --- a/api/server/BUILD +++ b/api/server/BUILD @@ -13,5 +13,6 @@ api_cc_py_proto_library( srcs = ["response_options.proto"], deps = [ "@envoy_api//envoy/api/v2/core:pkg", + "@envoy_api//envoy/config/core/v3:pkg", ], ) diff --git a/api/server/response_options.proto b/api/server/response_options.proto index 79c9e2dc9..bf64b916d 100644 --- a/api/server/response_options.proto +++ b/api/server/response_options.proto @@ -2,10 +2,11 @@ syntax = "proto3"; package nighthawk.server; -import "google/protobuf/wrappers.proto"; -import "validate/validate.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "validate/validate.proto"; message ConcurrencyBasedLinearDelay { // Minimal delay to add to replies. @@ -20,7 +21,15 @@ message ConcurrencyBasedLinearDelay { // configuration will override. message ResponseOptions { // List of additional response headers. - repeated envoy.api.v2.core.HeaderValueOption response_headers = 1; + // + // Envoy deprecated its v2 API, prefer to use v3_response_headers instead. + // Mutually exclusive with v3_response_headers. + repeated envoy.api.v2.core.HeaderValueOption response_headers = 1 [deprecated = true]; + + // List of additional response headers. + // Mutually exclusive with response_headers. + repeated envoy.config.core.v3.HeaderValueOption v3_response_headers = 7; + // Number of 'a' characters in the the response body. uint32 response_body_size = 2 [(validate.rules).uint32 = {lte: 4194304}]; // If true, then echo request headers in the response body. diff --git a/source/server/BUILD b/source/server/BUILD index 33e9a6dfc..c0af0f63e 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -37,6 +37,8 @@ envoy_cc_library( "@envoy//source/common/protobuf:message_validator_lib_with_external_headers", "@envoy//source/common/protobuf:utility_lib_with_external_headers", "@envoy//source/common/singleton:const_singleton_with_external_headers", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/server/configuration.cc b/source/server/configuration.cc index a6037bf14..ca17b6f46 100644 --- a/source/server/configuration.cc +++ b/source/server/configuration.cc @@ -2,6 +2,9 @@ #include +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" + #include "external/envoy/source/common/protobuf/message_validator_impl.h" #include "external/envoy/source/common/protobuf/utility.h" @@ -30,8 +33,20 @@ bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, const nighthawk::server::ResponseOptions& response_options) { - for (const auto& header_value_option : response_options.response_headers()) { - const auto& header = header_value_option.header(); + + // The validation guarantees we only get one of the fields (response_headers, v3_response_headers) + // set. + validateResponseOptions(response_options); + nighthawk::server::ResponseOptions v3_only_response_options = response_options; + for (const envoy::api::v2::core::HeaderValueOption& header_value_option : + v3_only_response_options.response_headers()) { + *v3_only_response_options.add_v3_response_headers() = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(header_value_option); + } + + for (const envoy::config::core::v3::HeaderValueOption& header_value_option : + v3_only_response_options.v3_response_headers()) { + const envoy::config::core::v3::HeaderValue& header = header_value_option.header(); auto lower_case_key = Envoy::Http::LowerCaseString(header.key()); if (!header_value_option.append().value()) { response_headers.remove(lower_case_key); @@ -40,6 +55,30 @@ void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_heade } } +envoy::config::core::v3::HeaderValueOption upgradeDeprecatedEnvoyV2HeaderValueOptionToV3( + const envoy::api::v2::core::HeaderValueOption& v2_header_value_option) { + envoy::config::core::v3::HeaderValueOption v3_header_value_option; + if (v2_header_value_option.has_append()) { + *v3_header_value_option.mutable_append() = v2_header_value_option.append(); + } + if (v2_header_value_option.has_header()) { + envoy::config::core::v3::HeaderValue* v3_header = v3_header_value_option.mutable_header(); + v3_header->set_key(v2_header_value_option.header().key()); + v3_header->set_value(v2_header_value_option.header().value()); + } + return v3_header_value_option; +} + +void validateResponseOptions(const nighthawk::server::ResponseOptions& response_options) { + if (response_options.response_headers_size() > 0 && + response_options.v3_response_headers_size() > 0) { + throw Envoy::EnvoyException( + absl::StrCat("invalid configuration in nighthawk::server::ResponseOptions ", + "cannot specify both response_headers and v3_response_headers ", + "configuration was: ", response_options.ShortDebugString())); + } +} + } // namespace Configuration } // namespace Server } // namespace Nighthawk diff --git a/source/server/configuration.h b/source/server/configuration.h index e44cca4a6..81aaf50da 100644 --- a/source/server/configuration.h +++ b/source/server/configuration.h @@ -2,6 +2,8 @@ #include +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" #include "envoy/http/header_map.h" #include "api/server/response_options.pb.h" @@ -27,10 +29,28 @@ bool mergeJsonConfig(absl::string_view json, nighthawk::server::ResponseOptions& * @param response_headers Response headers to transform to reflect the passed in response * options. * @param response_options Configuration specifying how to transform the header map. + * + * @throws Envoy::EnvoyException if invalid response_options are provided. */ void applyConfigToResponseHeaders(Envoy::Http::ResponseHeaderMap& response_headers, const nighthawk::server::ResponseOptions& response_options); +/** + * Upgrades Envoy's HeaderValueOption from the deprecated v2 API version to v3. + * + * @param v2_header_value_option The HeaderValueOption to be upgraded. + * @return a version of HeaderValueOption upgraded to Envoy API v3. + */ +envoy::config::core::v3::HeaderValueOption upgradeDeprecatedEnvoyV2HeaderValueOptionToV3( + const envoy::api::v2::core::HeaderValueOption& v2_header_value_option); + +/** + * Validates the ResponseOptions. + * + * @throws Envoy::EnvoyException on validation errors. + */ +void validateResponseOptions(const nighthawk::server::ResponseOptions& response_options); + } // namespace Configuration } // namespace Server } // namespace Nighthawk diff --git a/source/server/http_dynamic_delay_filter_config.cc b/source/server/http_dynamic_delay_filter_config.cc index 336a5da7b..87cda255b 100644 --- a/source/server/http_dynamic_delay_filter_config.cc +++ b/source/server/http_dynamic_delay_filter_config.cc @@ -7,6 +7,7 @@ #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" #include "server/http_dynamic_delay_filter.h" namespace Nighthawk { @@ -22,10 +23,11 @@ class HttpDynamicDelayDecoderFilterConfigFactory Envoy::Server::Configuration::FactoryContext& context) override { auto& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); - return createFilter( + const nighthawk::server::ResponseOptions& response_options = Envoy::MessageUtil::downcastAndValidate( - proto_config, validation_visitor), - context); + proto_config, validation_visitor); + validateResponseOptions(response_options); + return createFilter(response_options, context); } Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/server/http_test_server_filter_config.cc b/source/server/http_test_server_filter_config.cc index ffd814303..5a9754d9d 100644 --- a/source/server/http_test_server_filter_config.cc +++ b/source/server/http_test_server_filter_config.cc @@ -7,6 +7,7 @@ #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" #include "server/http_test_server_filter.h" namespace Nighthawk { @@ -19,12 +20,12 @@ class HttpTestServerDecoderFilterConfig Envoy::Http::FilterFactoryCb createFilterFactoryFromProto(const Envoy::Protobuf::Message& proto_config, const std::string&, Envoy::Server::Configuration::FactoryContext& context) override { - auto& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); - return createFilter( + const nighthawk::server::ResponseOptions& response_options = Envoy::MessageUtil::downcastAndValidate( - proto_config, validation_visitor), - context); + proto_config, validation_visitor); + validateResponseOptions(response_options); + return createFilter(response_options, context); } Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/server/http_time_tracking_filter_config.cc b/source/server/http_time_tracking_filter_config.cc index 76adb0bb1..e9e3e38d0 100644 --- a/source/server/http_time_tracking_filter_config.cc +++ b/source/server/http_time_tracking_filter_config.cc @@ -7,6 +7,7 @@ #include "api/server/response_options.pb.h" #include "api/server/response_options.pb.validate.h" +#include "server/configuration.h" #include "server/http_time_tracking_filter.h" namespace Nighthawk { @@ -21,10 +22,11 @@ class HttpTimeTrackingFilterConfig Envoy::Server::Configuration::FactoryContext& context) override { Envoy::ProtobufMessage::ValidationVisitor& validation_visitor = Envoy::ProtobufMessage::getStrictValidationVisitor(); - return createFilter( + const nighthawk::server::ResponseOptions& response_options = Envoy::MessageUtil::downcastAndValidate( - proto_config, validation_visitor), - context); + proto_config, validation_visitor); + validateResponseOptions(response_options); + return createFilter(response_options, context); } Envoy::ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/test/server/BUILD b/test/server/BUILD index 68d5bab08..20b13e589 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -65,3 +65,16 @@ envoy_cc_test( "@envoy//test/test_common:simulated_time_system_lib", ], ) + +envoy_cc_test( + name = "configuration_test", + srcs = ["configuration_test.cc"], + repository = "@envoy", + deps = [ + "//api/server:response_options_proto_cc_proto", + "//source/server:configuration_lib", + "@envoy//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/test/server/configuration_test.cc b/test/server/configuration_test.cc new file mode 100644 index 000000000..f0c1d9458 --- /dev/null +++ b/test/server/configuration_test.cc @@ -0,0 +1,235 @@ +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" + +#include "external/envoy/test/test_common/utility.h" + +#include "api/server/response_options.pb.validate.h" + +#include "server/configuration.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Nighthawk { +namespace Server { +namespace Configuration { +namespace { + +using ::Envoy::Http::LowerCaseString; +using ::Envoy::Http::TestResponseHeaderMapImpl; + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesEmptyHeaderValue) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_FALSE(v3_header_value_option.has_append()); + EXPECT_FALSE(v3_header_value_option.has_header()); +} + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesHeaderValueWithHeaderAndAppendSet) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + v2_header_value_option.mutable_append()->set_value(true); + v2_header_value_option.mutable_header()->set_key("key"); + v2_header_value_option.mutable_header()->set_value("value"); + + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_TRUE(v3_header_value_option.append().value()); + EXPECT_EQ(v3_header_value_option.header().key(), "key"); + EXPECT_EQ(v3_header_value_option.header().value(), "value"); +} + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesHeaderValueWithHeaderOnly) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + v2_header_value_option.mutable_header()->set_key("key"); + v2_header_value_option.mutable_header()->set_value("value"); + + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_FALSE(v3_header_value_option.has_append()); + EXPECT_EQ(v3_header_value_option.header().key(), "key"); + EXPECT_EQ(v3_header_value_option.header().value(), "value"); +} + +TEST(UpgradeDeprecatedEnvoyV2HeaderValueOptionToV3Test, UpgradesHeaderValueWithAppendOnly) { + envoy::api::v2::core::HeaderValueOption v2_header_value_option; + v2_header_value_option.mutable_append()->set_value(true); + + envoy::config::core::v3::HeaderValueOption v3_header_value_option = + upgradeDeprecatedEnvoyV2HeaderValueOptionToV3(v2_header_value_option); + + EXPECT_TRUE(v3_header_value_option.append().value()); + EXPECT_FALSE(v3_header_value_option.has_header()); +} + +// Specifies the Envoy API version to use in the test configuration. +enum EnvoyApiVersion { + EnvoyApiV2, + EnvoyApiV3, +}; + +// Specifies if headers with duplicate key should be appended or replace the +// previous header. +enum HeaderAddMode { + ReplaceOnDuplicateKey, + AppendOnDuplicateKey, +}; + +// Creates a test configuration with three headers, two of which have the same +// key. The following headers are added: +// +// key1: header1_value +// key2: header2_value +// key1: header3_value +// +// @param api_version determines the version of the Envoy API used in the +// created configuration. +// @param add_mode specifies how the header with the duplicate key is added. +// @return a configuration for the test. +nighthawk::server::ResponseOptions createTestConfiguration(EnvoyApiVersion api_version, + HeaderAddMode add_mode) { + nighthawk::server::ResponseOptions configuration; + + if (api_version == EnvoyApiV2) { + envoy::api::v2::core::HeaderValueOption* header1 = configuration.add_response_headers(); + header1->mutable_header()->set_key("key1"); + header1->mutable_header()->set_value("header1_value"); + + envoy::api::v2::core::HeaderValueOption* header2 = configuration.add_response_headers(); + header2->mutable_header()->set_key("key2"); + header2->mutable_header()->set_value("header2_value"); + + envoy::api::v2::core::HeaderValueOption* header3 = configuration.add_response_headers(); + header3->mutable_header()->set_key("key1"); + header3->mutable_header()->set_value("header3_value"); + if (add_mode == AppendOnDuplicateKey) { + header3->mutable_append()->set_value("true"); + } + } else if (api_version == EnvoyApiV3) { + envoy::config::core::v3::HeaderValueOption* header1 = configuration.add_v3_response_headers(); + header1->mutable_header()->set_key("key1"); + header1->mutable_header()->set_value("header1_value"); + + envoy::config::core::v3::HeaderValueOption* header2 = configuration.add_v3_response_headers(); + header2->mutable_header()->set_key("key2"); + header2->mutable_header()->set_value("header2_value"); + + envoy::config::core::v3::HeaderValueOption* header3 = configuration.add_v3_response_headers(); + header3->mutable_header()->set_key("key1"); + header3->mutable_header()->set_value("header3_value"); + if (add_mode == AppendOnDuplicateKey) { + header3->mutable_append()->set_value("true"); + } + } + return configuration; +} + +// Creates the expected header map for the specified add mode. +// +// @param add_mode specifies how the header with the duplicate key is added. +// @return a header map populated with the expected headers. +TestResponseHeaderMapImpl createExpectedHeaderMap(HeaderAddMode add_mode) { + TestResponseHeaderMapImpl expected_header_map; + if (add_mode == ReplaceOnDuplicateKey) { + expected_header_map.addCopy(LowerCaseString("key2"), "header2_value"); + expected_header_map.addCopy(LowerCaseString("key1"), "header3_value"); + } else if (add_mode == AppendOnDuplicateKey) { + expected_header_map.addCopy(LowerCaseString("key1"), "header1_value"); + expected_header_map.addCopy(LowerCaseString("key2"), "header2_value"); + expected_header_map.addCopy(LowerCaseString("key1"), "header3_value"); + } + return expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, ReplacesHeadersFromEnvoyApiV2Config) { + HeaderAddMode add_mode = ReplaceOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV2, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, AppendsHeadersFromEnvoyApiV2Config) { + HeaderAddMode add_mode = AppendOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV2, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, ReplacesHeadersFromEnvoyApiV3Config) { + HeaderAddMode add_mode = ReplaceOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV3, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, AppendsHeadersFromEnvoyApiV3Config) { + HeaderAddMode add_mode = AppendOnDuplicateKey; + nighthawk::server::ResponseOptions configuration = createTestConfiguration(EnvoyApiV3, add_mode); + + TestResponseHeaderMapImpl header_map; + applyConfigToResponseHeaders(header_map, configuration); + TestResponseHeaderMapImpl expected_header_map = createExpectedHeaderMap(add_mode); + + EXPECT_EQ(header_map, expected_header_map) << "got header_map:\n" + << header_map << "\nexpected_header_map:\n" + << expected_header_map; +} + +TEST(ApplyConfigToResponseHeaders, ThrowsOnInvalidConfiguration) { + nighthawk::server::ResponseOptions configuration; + configuration.add_response_headers(); + configuration.add_v3_response_headers(); + + TestResponseHeaderMapImpl header_map; + EXPECT_THROW(applyConfigToResponseHeaders(header_map, configuration), Envoy::EnvoyException); +} + +TEST(ValidateResponseOptions, DoesNotThrowOnEmptyConfiguration) { + nighthawk::server::ResponseOptions configuration; + EXPECT_NO_THROW(validateResponseOptions(configuration)); +} + +TEST(ValidateResponseOptions, DoesNotThrowWhenOnlyEnvoyApiV2ResponseHeadersAreSet) { + nighthawk::server::ResponseOptions configuration; + configuration.add_response_headers(); + EXPECT_NO_THROW(validateResponseOptions(configuration)); +} + +TEST(ValidateResponseOptions, DoesNotThrowWhenOnlyEnvoyApiV3ResponseHeadersAreSet) { + nighthawk::server::ResponseOptions configuration; + configuration.add_v3_response_headers(); + EXPECT_NO_THROW(validateResponseOptions(configuration)); +} + +TEST(ValidateResponseOptions, ThrowsWhenBothEnvoyApiV2AndV3ResponseHeadersAreSet) { + nighthawk::server::ResponseOptions configuration; + configuration.add_response_headers(); + configuration.add_v3_response_headers(); + EXPECT_THROW(validateResponseOptions(configuration), Envoy::EnvoyException); +} + +} // namespace +} // namespace Configuration +} // namespace Server +} // namespace Nighthawk diff --git a/test/server/http_dynamic_delay_filter_integration_test.cc b/test/server/http_dynamic_delay_filter_integration_test.cc index 0dfdb797b..b69049bee 100644 --- a/test/server/http_dynamic_delay_filter_integration_test.cc +++ b/test/server/http_dynamic_delay_filter_integration_test.cc @@ -10,6 +10,9 @@ #include "gtest/gtest.h" namespace Nighthawk { +namespace { + +using ::testing::HasSubstr; const Envoy::Http::LowerCaseString kDelayHeaderString("x-envoy-fault-delay-request"); @@ -38,6 +41,22 @@ class HttpDynamicDelayIntegrationTest INSTANTIATE_TEST_SUITE_P(IpVersions, HttpDynamicDelayIntegrationTest, testing::ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); +TEST_P(HttpDynamicDelayIntegrationTest, + DiesWhenBothEnvoyApiV2AndV3ResponseHeadersAreSetInConfiguration) { + const std::string invalid_configuration = R"EOF( + name: dynamic-delay + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_headers: + - { header: { key: "key1", value: "value1"} } + v3_response_headers: + - { header: { key: "key1", value: "value1"} } + )EOF"; + + ASSERT_DEATH(initializeFilterConfiguration(invalid_configuration), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + // Verify expectations with an empty dynamic-delay configuration. TEST_P(HttpDynamicDelayIntegrationTest, NoStaticConfiguration) { initializeFilterConfiguration(R"( @@ -157,4 +176,5 @@ TEST_F(ComputeTest, ComputeConcurrencyBasedLinearDelayMs) { EXPECT_EQ(compute(4, 1, 500000, 1, 500000), 5003); } +} // namespace } // namespace Nighthawk diff --git a/test/server/http_test_server_filter_integration_test.cc b/test/server/http_test_server_filter_integration_test.cc index 5850dab7e..db2233ff5 100644 --- a/test/server/http_test_server_filter_integration_test.cc +++ b/test/server/http_test_server_filter_integration_test.cc @@ -13,6 +13,8 @@ namespace { using namespace testing; +using ::testing::HasSubstr; + constexpr absl::string_view kDefaultProto = R"EOF( name: test-server typed_config: @@ -100,7 +102,7 @@ TEST_P(HttpTestServerIntegrationTest, TestTooLarge) { testBadResponseSize(max + 1); } -TEST_P(HttpTestServerIntegrationTest, TestHeaderConfig) { +TEST_P(HttpTestServerIntegrationTest, TestHeaderConfigUsingEnvoyApiV2) { initializeFilterConfiguration(kDefaultProto); setRequestLevelConfiguration( R"({response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"); @@ -114,6 +116,53 @@ TEST_P(HttpTestServerIntegrationTest, TestHeaderConfig) { EXPECT_EQ(std::string(10, 'a'), response->body()); } +TEST_P(HttpTestServerIntegrationTest, TestHeaderConfigUsingEnvoyApiV3) { + const std::string v3_configuration = R"EOF( + name: test-server + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_body_size: 10 + v3_response_headers: + - { header: { key: "foo", value: "bar2"}, append: true } + )EOF"; + + initializeFilterConfiguration(v3_configuration); + Envoy::IntegrationStreamDecoderPtr response = getResponse(ResponseOrigin::EXTENSION); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ(1, response->headers().get(Envoy::Http::LowerCaseString("foo")).size()); + EXPECT_EQ( + "bar2", + response->headers().get(Envoy::Http::LowerCaseString("foo"))[0]->value().getStringView()); + EXPECT_EQ(std::string(10, 'a'), response->body()); +} + +TEST_P(HttpTestServerIntegrationTest, + DiesWhenRequestLevelConfigurationResultsInBothEnvoyApiV2AndV3ResponseHeadersSet) { + initializeFilterConfiguration(kDefaultProto); + setRequestLevelConfiguration( + R"({v3_response_headers: [ { header: { key: "foo", value: "bar2"}, append: true } ]})"); + + ASSERT_DEATH(getResponse(ResponseOrigin::EXTENSION), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + +TEST_P(HttpTestServerIntegrationTest, + DiesWhenBothEnvoyApiV2AndV3ResponseHeadersAreSetInConfiguration) { + const std::string invalid_configuration = R"EOF( + name: test-server + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_headers: + - { header: { key: "key1", value: "value1"} } + v3_response_headers: + - { header: { key: "key1", value: "value1"} } + )EOF"; + + ASSERT_DEATH(initializeFilterConfiguration(invalid_configuration), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + TEST_P(HttpTestServerIntegrationTest, TestEchoHeaders) { initializeFilterConfiguration(kDefaultProto); setRequestLevelConfiguration("{echo_request_headers: true}"); diff --git a/test/server/http_time_tracking_filter_integration_test.cc b/test/server/http_time_tracking_filter_integration_test.cc index c61124392..9858fbf4f 100644 --- a/test/server/http_time_tracking_filter_integration_test.cc +++ b/test/server/http_time_tracking_filter_integration_test.cc @@ -17,6 +17,8 @@ namespace { using namespace std::chrono_literals; +using ::testing::HasSubstr; + const std::string kLatencyResponseHeaderName = "x-prd"; const std::string kDefaultProtoFragment = fmt::format( "emit_previous_request_delta_in_response_header: \"{}\"", kLatencyResponseHeaderName); @@ -37,6 +39,22 @@ class HttpTimeTrackingIntegrationTest INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTimeTrackingIntegrationTest, testing::ValuesIn(Envoy::TestEnvironment::getIpVersionsForTest())); +TEST_P(HttpTimeTrackingIntegrationTest, + DiesWhenBothEnvoyApiV2AndV3ResponseHeadersAreSetInConfiguration) { + const std::string invalid_configuration = R"EOF( + name: time-tracking + typed_config: + "@type": type.googleapis.com/nighthawk.server.ResponseOptions + response_headers: + - { header: { key: "key1", value: "value1"} } + v3_response_headers: + - { header: { key: "key1", value: "value1"} } + )EOF"; + + ASSERT_DEATH(initializeFilterConfiguration(invalid_configuration), + HasSubstr("cannot specify both response_headers and v3_response_headers")); +} + // Verify expectations with static/file-based time-tracking configuration. TEST_P(HttpTimeTrackingIntegrationTest, ReturnsPositiveLatencyForStaticConfiguration) { initializeFilterConfiguration(fmt::format(kProtoConfigTemplate, kDefaultProtoFragment)); From 602eeada02db782a401761a25e22e3bcb0a3a9a5 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Tue, 8 Dec 2020 03:30:57 -0500 Subject: [PATCH 47/63] Deprecate Envoy API v2 primitives in the service proto for Request Source. (#589) Converting the current field into a `oneof` and marking the `v2` primitive as deprecated. The old field retains its functionality for backward compatibility. Also: - sorting imports in `api/request_source/service.proto`. - migrating our dummy request source to using the `v3` primitive. - improving error message emitted on test failures in `test/request_stream_grpc_client_test.cc`, the current message just dumps byte representation of the two compared header objects. Fixes #580. Signed-off-by: Jakub Sobon mumak@google.com --- api/request_source/BUILD | 1 + api/request_source/service.proto | 18 ++++++-- source/client/BUILD | 1 + source/client/service_impl.cc | 8 ++-- source/common/BUILD | 2 + .../common/request_stream_grpc_client_impl.cc | 26 +++++++++-- test/BUILD | 3 ++ test/request_stream_grpc_client_test.cc | 45 ++++++++++++++++--- 8 files changed, 86 insertions(+), 18 deletions(-) diff --git a/api/request_source/BUILD b/api/request_source/BUILD index 9cf50e7ff..2473d6a2e 100644 --- a/api/request_source/BUILD +++ b/api/request_source/BUILD @@ -13,6 +13,7 @@ api_cc_py_proto_library( "@envoy_api//envoy/api/v2/auth:pkg", "@envoy_api//envoy/api/v2/cluster:pkg", "@envoy_api//envoy/api/v2/core:pkg", + "@envoy_api//envoy/config/core/v3:pkg", ], ) diff --git a/api/request_source/service.proto b/api/request_source/service.proto index 9ab934558..e1acdb135 100644 --- a/api/request_source/service.proto +++ b/api/request_source/service.proto @@ -2,8 +2,9 @@ syntax = "proto3"; package nighthawk.request_source; -import "google/protobuf/wrappers.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/core/v3/base.proto"; +import "google/protobuf/wrappers.proto"; // Used to request a RequestStreamResponse. message RequestStreamRequest { @@ -37,9 +38,18 @@ message RequestSpecifier { // Request content length. The client will transfer the number of bytes specified here for the // request body. google.protobuf.UInt32Value content_length = 4; - // Request header replacements. Any existing header(s) with the same name will be removed - // before setting. - envoy.api.v2.core.HeaderMap headers = 5; + + oneof oneof_headers { + // Request header replacements. Any existing header(s) with the same name will be removed + // before setting. + // + // Envoy deprecated its v2 API, prefer to use v3_headers instead. + envoy.api.v2.core.HeaderMap headers = 5 [deprecated = true]; + + // Request header replacements. Any existing header(s) with the same name will be removed + // before setting. + envoy.config.core.v3.HeaderMap v3_headers = 6; + } // TODO(oschaaf): nice to have // google.protobuf.StringValue sni_hostname = 10; } diff --git a/source/client/BUILD b/source/client/BUILD index b89c90bd7..4fe97d170 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -156,6 +156,7 @@ envoy_cc_library( "//api/client:grpc_service_lib", "//api/request_source:grpc_request_source_service_lib", "@envoy//source/common/common:thread_lib_with_external_headers", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/client/service_impl.cc b/source/client/service_impl.cc index 693955a79..fda3d69d7 100644 --- a/source/client/service_impl.cc +++ b/source/client/service_impl.cc @@ -2,6 +2,8 @@ #include +#include "envoy/config/core/v3/base.pb.h" + #include "common/request_source_impl.h" #include "client/client.h" @@ -106,7 +108,7 @@ ::grpc::Status ServiceImpl::ExecutionStream( } namespace { -void addHeader(envoy::api::v2::core::HeaderMap* map, absl::string_view key, +void addHeader(envoy::config::core::v3::HeaderMap* map, absl::string_view key, absl::string_view value) { auto* request_header = map->add_headers(); request_header->set_key(std::string(key)); @@ -144,7 +146,7 @@ ::grpc::Status RequestSourceServiceImpl::RequestStream( HeaderMapPtr headers = request->header(); nighthawk::request_source::RequestStreamResponse response; auto* request_specifier = response.mutable_request_specifier(); - auto* request_headers = request_specifier->mutable_headers(); + auto* request_headers = request_specifier->mutable_v3_headers(); headers->iterate([&request_headers](const Envoy::Http::HeaderEntry& header) -> Envoy::Http::HeaderMap::Iterate { addHeader(request_headers, header.key().getStringView(), header.value().getStringView()); @@ -163,4 +165,4 @@ ::grpc::Status RequestSourceServiceImpl::RequestStream( } } // namespace Client -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/source/common/BUILD b/source/common/BUILD index fd8cc3701..951c2a411 100644 --- a/source/common/BUILD +++ b/source/common/BUILD @@ -60,6 +60,8 @@ envoy_cc_library( "@envoy//source/common/grpc:typed_async_client_lib_with_external_headers", "@envoy//source/common/http:header_map_lib_with_external_headers", "@envoy//source/common/http:headers_lib_with_external_headers", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/request_stream_grpc_client_impl.cc b/source/common/request_stream_grpc_client_impl.cc index da0095042..31db65a69 100644 --- a/source/common/request_stream_grpc_client_impl.cc +++ b/source/common/request_stream_grpc_client_impl.cc @@ -2,16 +2,22 @@ #include +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" #include "envoy/stats/scope.h" #include "external/envoy/source/common/common/assert.h" #include "external/envoy/source/common/http/header_map_impl.h" #include "external/envoy/source/common/http/headers.h" +#include "api/request_source/service.pb.h" + #include "common/request_impl.h" namespace Nighthawk { +using ::nighthawk::request_source::RequestSpecifier; + const std::string RequestStreamGrpcClientImpl::METHOD_NAME = "nighthawk.request_source.NighthawkRequestSourceService.RequestStream"; @@ -75,15 +81,27 @@ RequestPtr ProtoRequestHelper::messageToRequest( RequestPtr request = std::make_unique(header); if (message.has_request_specifier()) { - const auto& request_specifier = message.request_specifier(); - if (request_specifier.has_headers()) { - const auto& message_request_headers = request_specifier.headers(); - for (const auto& message_header : message_request_headers.headers()) { + const RequestSpecifier& request_specifier = message.request_specifier(); + + if (request_specifier.has_v3_headers()) { + const envoy::config::core::v3::HeaderMap& message_request_headers = + request_specifier.v3_headers(); + for (const envoy::config::core::v3::HeaderValue& message_header : + message_request_headers.headers()) { + Envoy::Http::LowerCaseString header_name(message_header.key()); + header->remove(header_name); + header->addCopy(header_name, message_header.value()); + } + } else if (request_specifier.has_headers()) { + const envoy::api::v2::core::HeaderMap& message_request_headers = request_specifier.headers(); + for (const envoy::api::v2::core::HeaderValue& message_header : + message_request_headers.headers()) { Envoy::Http::LowerCaseString header_name(message_header.key()); header->remove(header_name); header->addCopy(header_name, message_header.value()); } } + if (request_specifier.has_content_length()) { std::string s_content_length = absl::StrCat("", request_specifier.content_length().value()); header->remove(Envoy::Http::Headers::get().ContentLength); diff --git a/test/BUILD b/test/BUILD index a56d270ef..c3ee1093a 100644 --- a/test/BUILD +++ b/test/BUILD @@ -326,8 +326,11 @@ envoy_cc_test( srcs = ["request_stream_grpc_client_test.cc"], repository = "@envoy", deps = [ + "//api/request_source:grpc_request_source_service_lib", "//source/common:request_stream_grpc_client_lib", "@envoy//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/test/request_stream_grpc_client_test.cc b/test/request_stream_grpc_client_test.cc index 91661ea52..b78b32f32 100644 --- a/test/request_stream_grpc_client_test.cc +++ b/test/request_stream_grpc_client_test.cc @@ -1,5 +1,10 @@ +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/config/core/v3/base.pb.h" + #include "external/envoy/test/test_common/utility.h" +#include "api/request_source/service.pb.h" + #include "common/request_impl.h" #include "common/request_stream_grpc_client_impl.h" @@ -8,6 +13,9 @@ using namespace testing; namespace Nighthawk { +namespace { + +using ::nighthawk::request_source::RequestSpecifier; // The grpc client itself is tested via the python based integration tests. // It is convenient to test message translation here. @@ -18,7 +26,9 @@ class ProtoRequestHelperTest : public Test { // We test for equality. If we observe mismatch, we use EXPECT_EQ which is guaranteed // to fail -- but will provide much more helpful output. if (!Envoy::TestUtility::headerMapEqualIgnoreOrder(expected_header_, *request->header())) { - EXPECT_EQ(expected_header_, *request->header()); + EXPECT_EQ(expected_header_, *request->header()) << "expected headers:\n" + << expected_header_ << "\nactual headers:\n" + << *request->header() << "\n"; }; } @@ -42,14 +52,34 @@ TEST_F(ProtoRequestHelperTest, ExplicitFields) { translateExpectingEqual(); } -// Test the generic header api we offer in the proto api. -TEST_F(ProtoRequestHelperTest, GenericHeaderFields) { - auto* request_specifier = response_.mutable_request_specifier(); - auto* headers = request_specifier->mutable_headers(); - auto* header_1 = headers->add_headers(); +// Test the generic header api we offer in the proto api using Envoy API v2 +// primitives. +TEST_F(ProtoRequestHelperTest, GenericHeaderFieldsUsingDeprecatedEnvoyV2Api) { + RequestSpecifier* request_specifier = response_.mutable_request_specifier(); + envoy::api::v2::core::HeaderMap* headers = request_specifier->mutable_headers(); + envoy::api::v2::core::HeaderValue* header_1 = headers->add_headers(); + header_1->set_key("header1"); + header_1->set_value("value1"); + envoy::api::v2::core::HeaderValue* header_2 = headers->add_headers(); + header_2->set_key("header2"); + header_2->set_value("value2"); + // We re-add the same header, but do not expect that to show up in the translation because we + // always replace. + headers->add_headers()->MergeFrom(*header_2); + expected_header_ = + Envoy::Http::TestRequestHeaderMapImpl{{"header1", "value1"}, {"header2", "value2"}}; + translateExpectingEqual(); +} + +// Test the generic header api we offer in the proto api using Envoy API v3 +// primitives. +TEST_F(ProtoRequestHelperTest, GenericHeaderFieldsUsingEnvoyV3Api) { + RequestSpecifier* request_specifier = response_.mutable_request_specifier(); + envoy::config::core::v3::HeaderMap* headers = request_specifier->mutable_v3_headers(); + envoy::config::core::v3::HeaderValue* header_1 = headers->add_headers(); header_1->set_key("header1"); header_1->set_value("value1"); - auto* header_2 = headers->add_headers(); + envoy::config::core::v3::HeaderValue* header_2 = headers->add_headers(); header_2->set_key("header2"); header_2->set_value("value2"); // We re-add the same header, but do not expect that to show up in the translation because we @@ -74,4 +104,5 @@ TEST_F(ProtoRequestHelperTest, AmbiguousHost) { translateExpectingEqual(); } +} // namespace } // namespace Nighthawk From 30bd6668b2d9696066b4b5ec4edb59464827b2f7 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Tue, 8 Dec 2020 14:42:23 -0500 Subject: [PATCH 48/63] Fail the integration tests on warnings or errors. (#581) Fails the integration tests if any unknown warnings or errors are found in the logs of the Nighthawk test server. Includes an ignore list that (for now) accepts known warnings found in the logs, the ignore list can have different content per each test case. Also: - moving the `Attributes:` docstring section on `IntegrationTestBase` into its class docstring. `__init__` is just a method and should only have the `Args:` section. Fixes #577 Signed-off-by: Jakub Sobon --- benchmarks/envoy_proxy.py | 30 ++-- test/integration/integration_test_fixtures.py | 109 ++++++++------ test/integration/nighthawk_test_server.py | 134 ++++++++++++++++- test/integration/unit_tests/BUILD | 11 ++ .../unit_tests/test_nighthawk_test_server.py | 139 ++++++++++++++++++ 5 files changed, 367 insertions(+), 56 deletions(-) create mode 100644 test/integration/unit_tests/BUILD create mode 100644 test/integration/unit_tests/test_nighthawk_test_server.py diff --git a/benchmarks/envoy_proxy.py b/benchmarks/envoy_proxy.py index 06ce8bc08..8a48ad362 100644 --- a/benchmarks/envoy_proxy.py +++ b/benchmarks/envoy_proxy.py @@ -28,13 +28,21 @@ class EnvoyProxyServer(NighthawkTestServer): See InjectHttpProxyIntegrationTestBase below for usage. """ - def __init__(self, config_template_path, server_ip, ip_version, parameters=dict(), tag=""): + def __init__(self, + config_template_path, + server_ip, + ip_version, + request, + parameters=dict(), + tag=""): """Initialize an EnvoyProxyServer instance. Arguments: config_template_path: Configuration template for the proxy. server_ip: IP address for the proxy to use. ip_version: IP version that the proxy should use when listening. + request: The pytest `request` test fixture used to determine information + about the currently executing test case. parameters: Dictionary. Supply this to provide template parameter replacement values (optional). tag: String. Supply this to get recognizeable output locations (optional). """ @@ -43,6 +51,7 @@ def __init__(self, config_template_path, server_ip, ip_version, parameters=dict( config_template_path, server_ip, ip_version, + request, parameters=parameters, tag=tag) self.docker_image = os.getenv("ENVOY_DOCKER_IMAGE_TO_TEST", "") @@ -61,15 +70,16 @@ class InjectHttpProxyIntegrationTestBase(HttpIntegrationTestBase): which directs traffic to that. Both will be listing for plain http traffic. """ - def __init__(self, ip_version, server_config, proxy_config): + def __init__(self, request, server_config, proxy_config): """Initialize an InjectHttpProxyIntegrationTestBase. Arguments: - ip_version: Use ipv4 or ipv6 + request: The pytest `request` test fixture used to determine information + about the currently executing test case. server_config: Path to the server configuration. proxy_config: Path to the proxy configuration. """ - super(InjectHttpProxyIntegrationTestBase, self).__init__(ip_version, server_config) + super(InjectHttpProxyIntegrationTestBase, self).__init__(request, server_config) self._proxy_config = proxy_config def setUp(self): @@ -85,6 +95,7 @@ def setUp(self): proxy_server = EnvoyProxyServer(self._proxy_config, self.server_ip, self.ip_version, + self.request, parameters=self.parameters, tag=self.tag) assert (proxy_server.start()) @@ -92,9 +103,9 @@ def setUp(self): port=proxy_server.server_port)) self.proxy_server = proxy_server - def tearDown(self): + def tearDown(self, caplog): """Tear down the proxy and test server. Assert that both exit succesfully.""" - super(InjectHttpProxyIntegrationTestBase, self).tearDown() + super(InjectHttpProxyIntegrationTestBase, self).tearDown(caplog) assert (self.proxy_server.stop() == 0) def getTestServerRootUri(self): @@ -106,7 +117,7 @@ def getTestServerRootUri(self): @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def inject_envoy_http_proxy_fixture(request, server_config, proxy_config): +def inject_envoy_http_proxy_fixture(request, server_config, proxy_config, caplog): """Injects an Envoy proxy in front of the test server. NOTE: Depends on the proxy_config fixture, which must be explicitly imported @@ -116,10 +127,11 @@ def inject_envoy_http_proxy_fixture(request, server_config, proxy_config): request: supplies the ip version. server_config: path to the server configuration template. proxy_config: path to the proxy configuration template. + caplog: The pytest `caplog` test fixture used to examine logged messages. Yields: a successfully set up InjectHttpProxyIntegrationTestBase instance. """ - fixture = InjectHttpProxyIntegrationTestBase(request.param, server_config, proxy_config) + fixture = InjectHttpProxyIntegrationTestBase(request, server_config, proxy_config) fixture.setUp() yield fixture - fixture.tearDown() + fixture.tearDown(caplog) diff --git a/test/integration/integration_test_fixtures.py b/test/integration/integration_test_fixtures.py index 34fea1d15..8e29a1940 100644 --- a/test/integration/integration_test_fixtures.py +++ b/test/integration/integration_test_fixtures.py @@ -54,30 +54,35 @@ class IntegrationTestBase(): work when there is only one test server. This class will be refactored (https://github.com/envoyproxy/nighthawk/issues/258). + + Attributes: + ip_version: IP version that the proxy should use when listening. + server_ip: string containing the server ip that will be used to listen + tag: String. Supply this to get recognizeable output locations. + parameters: Dictionary. Supply this to provide template parameter replacement values. + grpc_service: NighthawkGrpcService instance or None. Set by startNighthawkGrpcService(). + test_server: NighthawkTestServer instance, set during setUp(). + nighthawk_client_path: String, path to the nighthawk_client binary. + request: The pytest `request` test fixture used to determine information + about the currently executing test case. """ - def __init__(self, ip_version, server_config, backend_count=1, bootstrap_version_arg=None): + def __init__(self, request, server_config, backend_count=1, bootstrap_version_arg=None): """Initialize the IntegrationTestBase instance. Args: ip_version: a single IP mode that this instance will test: IpVersion.IPV4 or IpVersion.IPV6 + request: The pytest `request` test fixture used to determine information + about the currently executing test case. server_config: path to the server configuration backend_count: number of Nighthawk Test Server backends to run, to allow testing MultiTarget mode bootstrap_version_arg: An optional int, specify a bootstrap cli argument value for the test server binary. If None is specified, no bootstrap cli argment will be passed. - - Attributes: - ip_version: IP version that the proxy should use when listening. - server_ip: string containing the server ip that will be used to listen - tag: String. Supply this to get recognizeable output locations. - parameters: Dictionary. Supply this to provide template parameter replacement values. - grpc_service: NighthawkGrpcService instance or None. Set by startNighthawkGrpcService(). - test_server: NighthawkTestServer instance, set during setUp(). - nighthawk_client_path: String, path to the nighthawk_client binary. """ super(IntegrationTestBase, self).__init__() - assert ip_version != IpVersion.UNKNOWN - self.ip_version = ip_version - self.server_ip = "::" if ip_version == IpVersion.IPV6 else "0.0.0.0" + self.request = request + self.ip_version = request.param + assert self.ip_version != IpVersion.UNKNOWN + self.server_ip = "::" if self.ip_version == IpVersion.IPV6 else "0.0.0.0" self.server_ip = os.getenv("TEST_SERVER_EXTERNAL_IP", self.server_ip) self.tag = "" self.parameters = {} @@ -88,7 +93,7 @@ def __init__(self, ip_version, server_config, backend_count=1, bootstrap_version self._nighthawk_test_config_path = server_config self._nighthawk_service_path = "nighthawk_service" self._nighthawk_output_transform_path = "nighthawk_output_transform" - self._socket_type = socket.AF_INET6 if ip_version == IpVersion.IPV6 else socket.AF_INET + self._socket_type = socket.AF_INET6 if self.ip_version == IpVersion.IPV6 else socket.AF_INET self._test_servers = [] self._backend_count = backend_count self._test_id = "" @@ -126,8 +131,14 @@ def setUp(self): self.tag = "{timestamp}/{test_id}".format(timestamp=_TIMESTAMP, test_id=self._test_id) assert self._tryStartTestServers(), "Test server(s) failed to start" - def tearDown(self): - """Stop the server.""" + def tearDown(self, caplog): + """Stop the server. + + Fails the test if any warnings or errors were logged. + + Args: + caplog: The pytest `caplog` test fixture used to examine logged messages. + """ if self.grpc_service is not None: assert (self.grpc_service.stop() == 0) @@ -137,12 +148,22 @@ def tearDown(self): any_failed = True assert (not any_failed) + warnings_and_errors = [] + for when in ("setup", "call", "teardown"): + for record in caplog.get_records(when): + if record.levelno not in (logging.WARNING, logging.ERROR): + continue + warnings_and_errors.append(record.message) + if warnings_and_errors: + pytest.fail("warnings or errors encountered during testing:\n{}".format(warnings_and_errors)) + def _tryStartTestServers(self): for i in range(self._backend_count): test_server = NighthawkTestServer(self._nighthawk_test_server_path, self._nighthawk_test_config_path, self.server_ip, self.ip_version, + self.request, parameters=self.parameters, tag=self.tag, bootstrap_version_arg=self._bootstrap_version_arg) @@ -292,9 +313,9 @@ class HttpIntegrationTestBase(IntegrationTestBase): by pytest. """ - def __init__(self, ip_version, server_config): + def __init__(self, request, server_config): """See base class.""" - super(HttpIntegrationTestBase, self).__init__(ip_version, server_config) + super(HttpIntegrationTestBase, self).__init__(request, server_config) def getTestServerRootUri(self): """See base class.""" @@ -309,10 +330,10 @@ class HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap(IntegrationTestBase) by pytest. """ - def __init__(self, ip_version, server_config): + def __init__(self, request, server_config): """See base class.""" super(HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap, - self).__init__(ip_version, server_config, bootstrap_version_arg=2) + self).__init__(request, server_config, bootstrap_version_arg=2) def getTestServerRootUri(self): """See base class.""" @@ -323,10 +344,9 @@ def getTestServerRootUri(self): class MultiServerHttpIntegrationTestBase(IntegrationTestBase): """Base for running plain http tests against multiple Nighthawk test servers.""" - def __init__(self, ip_version, server_config, backend_count): + def __init__(self, request, server_config, backend_count): """See base class.""" - super(MultiServerHttpIntegrationTestBase, self).__init__(ip_version, server_config, - backend_count) + super(MultiServerHttpIntegrationTestBase, self).__init__(request, server_config, backend_count) def getTestServerRootUri(self): """See base class.""" @@ -340,9 +360,9 @@ def getAllTestServerRootUris(self): class HttpsIntegrationTestBase(IntegrationTestBase): """Base for https tests against the Nighthawk test server.""" - def __init__(self, ip_version, server_config): + def __init__(self, request, server_config): """See base class.""" - super(HttpsIntegrationTestBase, self).__init__(ip_version, server_config) + super(HttpsIntegrationTestBase, self).__init__(request, server_config) def getTestServerRootUri(self): """See base class.""" @@ -352,9 +372,9 @@ def getTestServerRootUri(self): class SniIntegrationTestBase(HttpsIntegrationTestBase): """Base for https/sni tests against the Nighthawk test server.""" - def __init__(self, ip_version, server_config): + def __init__(self, request, server_config): """See base class.""" - super(SniIntegrationTestBase, self).__init__(ip_version, server_config) + super(SniIntegrationTestBase, self).__init__(request, server_config) def getTestServerRootUri(self): """See base class.""" @@ -364,10 +384,9 @@ def getTestServerRootUri(self): class MultiServerHttpsIntegrationTestBase(IntegrationTestBase): """Base for https tests against multiple Nighthawk test servers.""" - def __init__(self, ip_version, server_config, backend_count): + def __init__(self, request, server_config, backend_count): """See base class.""" - super(MultiServerHttpsIntegrationTestBase, self).__init__(ip_version, server_config, - backend_count) + super(MultiServerHttpsIntegrationTestBase, self).__init__(request, server_config, backend_count) def getTestServerRootUri(self): """See base class.""" @@ -389,65 +408,65 @@ def server_config(): @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def http_test_server_fixture(request, server_config): +def http_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with the stock http server configuration. Yields: HttpIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = HttpIntegrationTestBase(request.param, server_config) + f = HttpIntegrationTestBase(request, server_config) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def http_test_server_fixture_envoy_deprecated_v2_api(request, server_config): +def http_test_server_fixture_envoy_deprecated_v2_api(request, server_config, caplog): """Fixture for setting up a test environment with http server configuration that uses v2 configuration. Yields: HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap: A fully set up instance. Tear down will happen automatically. """ - f = HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap(request.param, server_config) + f = HttpIntegrationTestBaseWithEnvoyDeprecatedV2Bootstrap(request, server_config) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def https_test_server_fixture(request, server_config): +def https_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with the stock https server configuration. Yields: HttpsIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = HttpsIntegrationTestBase(request.param, server_config) + f = HttpsIntegrationTestBase(request, server_config) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def multi_http_test_server_fixture(request, server_config): +def multi_http_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with multiple servers, using the stock http server configuration. Yields: MultiServerHttpIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = MultiServerHttpIntegrationTestBase(request.param, server_config, backend_count=3) + f = MultiServerHttpIntegrationTestBase(request, server_config, backend_count=3) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def multi_https_test_server_fixture(request, server_config): +def multi_https_test_server_fixture(request, server_config, caplog): """Fixture for setting up a test environment with multiple servers, using the stock https server configuration. Yields: MultiServerHttpsIntegrationTestBase: A fully set up instance. Tear down will happen automatically. """ - f = MultiServerHttpsIntegrationTestBase(request.param, server_config, backend_count=3) + f = MultiServerHttpsIntegrationTestBase(request, server_config, backend_count=3) f.setUp() yield f - f.tearDown() + f.tearDown(caplog) diff --git a/test/integration/nighthawk_test_server.py b/test/integration/nighthawk_test_server.py index b4ee5d056..e4c9ea6d4 100644 --- a/test/integration/nighthawk_test_server.py +++ b/test/integration/nighthawk_test_server.py @@ -1,14 +1,16 @@ """Contains the NighthawkTestServer class, which wraps the nighthawk_test_servern binary.""" +import collections import http.client import json import logging import os +import random +import re +import requests import socket import subprocess import sys -import random -import requests import tempfile import threading import time @@ -40,6 +42,65 @@ def _substitute_yaml_values(runfiles_instance, obj, params): return obj +class _TestCaseWarnErrorIgnoreList( + collections.namedtuple("_TestCaseWarnErrorIgnoreList", "test_case_regexp ignore_list")): + """Maps test case names to messages that should be ignored in the test server logs. + + If the name of the currently executing test case matches the test_case_regexp, + any messages logged by the test server as either a WARNING or an ERROR + will be checked against the ignore_list. If the logged messages contain any of + the messages in the ignore list as a substring, they will be ignored. + Any unmatched messages of either a WARNING or an ERROR severity will fail the + test case. + + Attributes: + test_case_regexp: A compiled regular expression as returned by re.compile(), + the regexp that will be used to match test case names. + ignore_list: A tuple of strings, messages to ignore for matching test cases. + """ + + +# A list of _TestCaseWarnErrorIgnoreList instances, message pieces that should +# be ignored even if logged by the test server at a WARNING or an ERROR +# severity. +# +# If multiple test_case_regexp entries match the current test case name, all the +# corresponding ignore lists will be used. +_TEST_SERVER_WARN_ERROR_IGNORE_LIST = frozenset([ + # This test case purposefully uses the deprecated Envoy v2 API which emits + # the following warnings. + _TestCaseWarnErrorIgnoreList( + re.compile('test_nighthawk_test_server_envoy_deprecated_v2_api'), + ( + "Configuration does not parse cleanly as v3. v2 configuration is deprecated", + "Deprecated field: type envoy.api.v2.listener.Filter", + "Deprecated field: type envoy.config.filter.network.http_connection_manager.v2.HttpFilter", + "Using deprecated extension name 'envoy.http_connection_manager'", + "Using deprecated extension name 'envoy.router'", + ), + ), + + # A catch-all that applies to all remaining test cases. + _TestCaseWarnErrorIgnoreList( + re.compile('.*'), + ( + # TODO(#582): Identify these and file issues or add explanation as necessary. + "Unable to use runtime singleton for feature envoy.http.headermap.lazy_map_min_size", + "Using deprecated extension name 'envoy.listener.tls_inspector' for 'envoy.filters.listener.tls_inspector'.", + "there is no configured limit to the number of allowed active connections. Set a limit via the runtime key overload.global_downstream_max_connections", + + # A few of our filters use the same typed configuration, specifically + # 'test-server', 'time-tracking' and 'dynamic-delay'. + # For now this is by design. + "Double registration for type: 'nighthawk.server.ResponseOptions'", + + # Logged for normal termination, not really a warning. + "caught SIGTERM", + ), + ), +]) + + class TestServerBase(object): """Base class for running a server in a separate process. @@ -56,6 +117,7 @@ def __init__(self, config_template_path, server_ip, ip_version, + request, server_binary_config_path_arg, parameters, tag, @@ -67,6 +129,7 @@ def __init__(self, config_template_path (str): specify the path to the test server configuration template. server_ip (str): Specify the ip address the test server should use to listen for traffic. ip_version (IPAddress): Specify the ip version the server should use to listen for traffic. + request: The pytest `request` fixture used to determin information about the currently executed test. server_binary_config_path_arg (str): Specify the name of the CLI argument the test server binary uses to accept a configuration path. parameters (dict): Supply to provide configuration template parameter replacement values. tag (str): Supply to get recognizeable output locations. @@ -92,6 +155,7 @@ def __init__(self, self._server_binary_config_path_arg = server_binary_config_path_arg self._bootstrap_version_arg = bootstrap_version_arg self._prepareForExecution() + self._request = request def _prepareForExecution(self): runfiles_instance = runfiles.Create() @@ -138,6 +202,13 @@ def _serverThreadRunner(self): stdout, stderr = self._server_process.communicate() logging.info("Process stdout: %s", stdout.decode("utf-8")) logging.info("Process stderr: %s", stderr.decode("utf-8")) + warnings, errors = _extractWarningsAndErrors(stdout.decode() + stderr.decode(), + self._request.node.name, + _TEST_SERVER_WARN_ERROR_IGNORE_LIST) + if warnings: + [logging.warn("Process logged a warning: %s", w) for w in warnings] + if errors: + [logging.error("Process logged an error: %s", e) for e in errors] def fetchJsonFromAdminInterface(self, path): """Fetch and parse json from the admin interface. @@ -239,6 +310,7 @@ def __init__(self, config_template_path, server_ip, ip_version, + request, parameters=dict(), tag="", bootstrap_version_arg=None): @@ -249,6 +321,7 @@ def __init__(self, config_template_path (String): Path to the nighthawk test server configuration template. server_ip (String): Ip address for the server to use when listening. ip_version (IPVersion): IPVersion enum member indicating the ip version that the server should use when listening. + request: The pytest `request` fixture used to determin information about the currently executed test. parameters (dictionary, optional): Directionary with replacement values for substition purposes in the server configuration template. Defaults to dict(). tag (str, optional): Tags. Supply this to get recognizeable output locations. Defaults to "". bootstrap_version_arg (String, optional): Specify a cli argument value for --bootstrap-version when running the server. @@ -257,6 +330,7 @@ def __init__(self, config_template_path, server_ip, ip_version, + request, "--config-path", parameters, tag, @@ -273,3 +347,59 @@ def getCliVersionString(self): stdout, stderr = process.communicate() assert process.wait() == 0 return stdout.decode("utf-8").strip() + + +def _matchesAnyIgnoreListEntry(line, test_case_name, ignore_list): + """Determine if the line matches any of the ignore list entries for this test case. + + Args: + line: A string, the logged line. + test_case_name: A string, name of the currently executed test case. + ignore_list: A list of _TestCaseWarnErrorIgnoreList instances, the ignore + lists to match against. + + Returns: + A boolean, True if the logged line matches any of the ignore list entries, + False otherwise. + """ + for test_case_ignore_list in ignore_list: + if not test_case_ignore_list.test_case_regexp.match(test_case_name): + continue + for ignore_message in test_case_ignore_list.ignore_list: + if ignore_message in line: + return True + return False + + +def _extractWarningsAndErrors(process_output, test_case_name, ignore_list): + """Extract warnings and errors from the process_output. + + Args: + process_output: A string, the stdout or stderr after running a process. + test_case_name: A string, the name of the current test case. + ignore_list: A list of _TestCaseWarnErrorIgnoreList instances, the message + pieces to ignore. If a message that was logged either at a WARNING or at + an ERROR severity contains one of these message pieces and should be + ignored for the current test case, it will be excluded from the return + values. + + Returns: + A tuple of two lists of strings, the first list contains the warnings found + in the process_output and the second list contains the errors found in the + process_output. + """ + warnings = [] + errors = [] + for line in process_output.split('\n'): + # Optimization - no need to examine lines that aren't errors or warnings. + if "[warning]" not in line and "[error]" not in line: + continue + + if _matchesAnyIgnoreListEntry(line, test_case_name, ignore_list): + continue + + if "[warning]" in line: + warnings.append(line) + elif "[error]" in line: + errors.append(line) + return warnings, errors diff --git a/test/integration/unit_tests/BUILD b/test/integration/unit_tests/BUILD new file mode 100644 index 000000000..8da878e31 --- /dev/null +++ b/test/integration/unit_tests/BUILD @@ -0,0 +1,11 @@ +load("@rules_python//python:defs.bzl", "py_binary", "py_library") + +licenses(["notice"]) # Apache 2 + +py_test( + name = "test_nighthawk_test_server", + srcs = ["test_nighthawk_test_server.py"], + deps = [ + "//test/integration:integration_test_base_lean", + ], +) diff --git a/test/integration/unit_tests/test_nighthawk_test_server.py b/test/integration/unit_tests/test_nighthawk_test_server.py new file mode 100644 index 000000000..cd7a25e83 --- /dev/null +++ b/test/integration/unit_tests/test_nighthawk_test_server.py @@ -0,0 +1,139 @@ +"""Contains unit tests for functions in nighthawk_test_server.py.""" + +import pytest +import re + +from test.integration import nighthawk_test_server + + +def test_extractWarningsAndErrors_nothing_on_empty_output(): + """Test with an empty input.""" + warnings, errors = nighthawk_test_server._extractWarningsAndErrors("", "test_case", []) + assert not warnings + assert not errors + + +def test_extractWarningsAndErrors_ignores_info_logs(): + """Test where the process output doesn't contain any warnings or errors.""" + process_output = """ + [2020-12-01 04:41:57.219][126][info][misc] Message. + """ + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert not warnings + assert not errors + + +def test_extractWarningsAndErrors_extracts_a_warning(): + """Test where the process output contains a single warning.""" + process_output = "[2020-12-01 04:41:57.219][126][warning][misc] Message." + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert warnings == ["[2020-12-01 04:41:57.219][126][warning][misc] Message."] + assert not errors + + +def test_extractWarningsAndErrors_extracts_an_error(): + """Test where the process output contains a single error.""" + process_output = "[2020-12-01 04:41:57.219][126][error][misc] Message." + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert not warnings + assert errors == ["[2020-12-01 04:41:57.219][126][error][misc] Message."] + + +def test_extractWarningsAndErrors_extracts_multiple_messages(): + """Test where the process output contains multiple warnings and errors.""" + process_output = """[warning][misc] Warning1. +[error][misc] Error1. +[info][misc] Info1. +[error][runtime] Error2. +[warning][runtime] Warning2. + """ + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + []) + assert warnings == ["[warning][misc] Warning1.", "[warning][runtime] Warning2."] + assert errors == ["[error][misc] Error1.", "[error][runtime] Error2."] + + +def test_extractWarningsAndErrors_skips_messages_matching_ignore_list_when_test_case_matched_with_a_glob( +): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile(".*"), ("foo", "bar")), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + ignore_list) + assert not warnings + assert errors == ["[error][runtime] Error2 baz."] + + +def test_extractWarningsAndErrors_skips_messages_matching_ignore_list_when_test_case_matched_exactly( +): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case"), ("foo", "bar")), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case", + ignore_list) + assert not warnings + assert errors == ["[error][runtime] Error2 baz."] + + +def test_extractWarningsAndErrors_does_not_apply_ignore_list_for_non_matching_test_case_name(): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case1"), ("foo",)), + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case2"), ("bar",)), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case1", + ignore_list) + assert warnings == ["[warning][runtime] Warning2 bar."] + assert errors == [ + "[error][misc] Error1 bar.", + "[error][runtime] Error2 baz.", + ] + + +def test_extractWarningsAndErrors_applies_all_matching_ignore_lists(): + """Test where the ignore list is used.""" + process_output = """[warning][misc] Warning1 foo. +[error][misc] Error1 bar. +[info][misc] Info1. +[error][runtime] Error2 baz. +[warning][runtime] Warning2 bar. + """ + + ignore_list = [ + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile("test_case1"), ("foo",)), + nighthawk_test_server._TestCaseWarnErrorIgnoreList(re.compile(".*"), ("bar",)), + ] + warnings, errors = nighthawk_test_server._extractWarningsAndErrors(process_output, "test_case1", + ignore_list) + assert not warnings + assert errors == ["[error][runtime] Error2 baz."] + + +if __name__ == "__main__": + raise SystemExit(pytest.main([__file__])) From 395422a1c03c83dbb070ef62829ca8a886ea644f Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 9 Dec 2020 02:10:41 +0100 Subject: [PATCH 49/63] Use v3 response headers in configuration (#592) Signed-off-by: Otto van der Schaaf --- ci/docker/default-config.yaml | 2 +- source/server/README.md | 2 +- test/integration/configurations/nighthawk_http_origin.yaml | 2 +- .../nighthawk_http_origin_envoy_deprecated_v2_api.yaml | 2 +- test/integration/configurations/nighthawk_https_origin.yaml | 2 +- test/integration/configurations/sni_origin.yaml | 4 ++-- test/integration/unit_tests/BUILD | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ci/docker/default-config.yaml b/ci/docker/default-config.yaml index 8c931f188..741c9800c 100644 --- a/ci/docker/default-config.yaml +++ b/ci/docker/default-config.yaml @@ -28,7 +28,7 @@ static_resources: - name: test-server config: response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - name: envoy.router config: diff --git a/source/server/README.md b/source/server/README.md index f1c27000d..e916feaa0 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -55,7 +55,7 @@ static_resources: typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "foo", value: "bar" } } - { header: { key: "foo", value: "bar2" }, diff --git a/test/integration/configurations/nighthawk_http_origin.yaml b/test/integration/configurations/nighthawk_http_origin.yaml index 618a11758..3dbf4392a 100644 --- a/test/integration/configurations/nighthawk_http_origin.yaml +++ b/test/integration/configurations/nighthawk_http_origin.yaml @@ -30,7 +30,7 @@ static_resources: typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - name: envoy.filters.http.router typed_config: diff --git a/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml b/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml index 0e795b6ba..5e07954c7 100644 --- a/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml +++ b/test/integration/configurations/nighthawk_http_origin_envoy_deprecated_v2_api.yaml @@ -27,7 +27,7 @@ static_resources: - name: test-server config: response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - name: envoy.router config: diff --git a/test/integration/configurations/nighthawk_https_origin.yaml b/test/integration/configurations/nighthawk_https_origin.yaml index 9bdc69805..3423897e3 100644 --- a/test/integration/configurations/nighthawk_https_origin.yaml +++ b/test/integration/configurations/nighthawk_https_origin.yaml @@ -28,7 +28,7 @@ static_resources: typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1" } } - name: envoy.filters.http.router typed_config: diff --git a/test/integration/configurations/sni_origin.yaml b/test/integration/configurations/sni_origin.yaml index ba531fd7c..15dffaf3f 100644 --- a/test/integration/configurations/sni_origin.yaml +++ b/test/integration/configurations/sni_origin.yaml @@ -63,7 +63,7 @@ static_resources: typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - name: envoy.filters.http.router typed_config: @@ -87,7 +87,7 @@ static_resources: typed_config: "@type": type.googleapis.com/nighthawk.server.ResponseOptions response_body_size: 10 - response_headers: + v3_response_headers: - { header: { key: "x-nh", value: "1"}} - name: envoy.filters.http.router typed_config: diff --git a/test/integration/unit_tests/BUILD b/test/integration/unit_tests/BUILD index 8da878e31..faaefdd23 100644 --- a/test/integration/unit_tests/BUILD +++ b/test/integration/unit_tests/BUILD @@ -1,4 +1,4 @@ -load("@rules_python//python:defs.bzl", "py_binary", "py_library") +load("@rules_python//python:defs.bzl", "py_test") licenses(["notice"]) # Apache 2 From 172760a6fa4ee3cebdfbf7cb095397c696b19582 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Fri, 11 Dec 2020 16:11:20 +0100 Subject: [PATCH 50/63] Update Envoy to 8188e232a9e0b15111d30f4724cbc7bf77d3964a (#587) List of changes: - Update Envoy to 8188e232a9e0b15111d30f4724cbc7bf77d3964a (Dec 8th) - Sync .bazelrc - Modify extensions_build_config.bzl, add required DISABLED_BY_DEFAULT_EXTENSIONS - Rework our h1 connection pool derivation to keep our own prefetching feature working. Note: We enable the allow_prefetch runtime feature for this. - Changes to reflect changed Envoy constructors and method signatures: OnPoolReady(), ProdClusterManagerFactory(), allocateConnPool() - Modified log filtering in the integration test: `SIGTERM` -> `ENVOY_SIGTERM`. - Dropped include of `ares.h` as small cleanup, we no longer need that. - In `options_impl.cc` there's a small change to how we define `elapsed_since_epoch` to unbreak building with `libc++`: a regression of https://github.com/envoyproxy/nighthawk/issues/569. Filed https://github.com/envoyproxy/nighthawk/issues/594 to avoid regression. Signed-off-by: Otto van der Schaaf --- .bazelrc | 35 ++++++++++++----------- bazel/repositories.bzl | 4 +-- extensions_build_config.bzl | 3 ++ source/client/BUILD | 1 + source/client/benchmark_client_impl.cc | 10 +++---- source/client/options_impl.cc | 5 ++-- source/client/process_impl.cc | 23 ++++++++------- source/client/process_impl.h | 2 ++ source/client/stream_decoder.cc | 3 +- source/client/stream_decoder.h | 3 +- source/server/README.md | 6 +--- test/benchmark_http_client_test.cc | 3 +- test/integration/nighthawk_test_server.py | 2 +- test/process_test.cc | 6 ++++ test/stream_decoder_test.cc | 6 ++-- 15 files changed, 65 insertions(+), 47 deletions(-) diff --git a/.bazelrc b/.bazelrc index 525535336..ebe96668d 100644 --- a/.bazelrc +++ b/.bazelrc @@ -60,8 +60,8 @@ build:asan --config=sanitizer # ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN build:asan --define signal_trace=disabled build:asan --define ENVOY_CONFIG_ASAN=1 -build:asan --copt -fsanitize=address,undefined -build:asan --linkopt -fsanitize=address,undefined +build:asan --copt -fsanitize=address +build:asan --linkopt -fsanitize=address # vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh. build:asan --copt -fno-sanitize=vptr,function build:asan --linkopt -fno-sanitize=vptr,function @@ -152,9 +152,9 @@ build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" -coverage:test-coverage --test_arg="-l trace" -coverage:fuzz-coverage --config=plain-fuzzer -coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh +build:test-coverage --test_arg="-l trace" +build:fuzz-coverage --config=plain-fuzzer +build:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 @@ -285,22 +285,18 @@ build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds -# -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION is passed in in the bazel build target -# rules for fuzz tests. Passing it in the CLI will cause dependencies to be build -# with the macro. Causing issues in RouteMatcherTest.TestRoutes that expect prod -# behavior from RE2 library. -build:asan-fuzzer --config=asan -build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link -build:asan-fuzzer --copt=-fno-omit-frame-pointer -# Remove UBSAN halt_on_error to avoid crashing on protobuf errors. -build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 - # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link build:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link +build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + +build:asan-fuzzer --config=plain-fuzzer +build:asan-fuzzer --config=asan +build:asan-fuzzer --copt=-fno-omit-frame-pointer +# Remove UBSAN halt_on_error to avoid crashing on protobuf errors. +build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 # Compile database generation config build:compdb --build_tag_filters=-nocompdb @@ -310,6 +306,7 @@ build:windows --action_env=TMPDIR build:windows --define signal_trace=disabled build:windows --define hot_restart=disabled build:windows --define tcmalloc=disabled +build:windows --define wasm=disabled build:windows --define manual_stamp=manual_stamp build:windows --cxxopt="/std:c++17" @@ -334,6 +331,12 @@ build:clang-cl --define clang_cl=1 # Override determinism flags (DATE etc) is valid on clang-cl compiler build:clang-cl --copt="-Wno-macro-redefined" build:clang-cl --copt="-Wno-builtin-macro-redefined" +# Workaround problematic missing override declarations of mocks +# TODO: resolve this class of problematic mocks, e.g. +# ./test/mocks/http/stream.h(16,21): error: 'addCallbacks' +# overrides a member function but is not marked 'override' +# MOCK_METHOD(void, addCallbacks, (StreamCallbacks & callbacks)); +build:clang-cl --copt="-Wno-inconsistent-missing-override" build:clang-cl --action_env=USE_CLANG_CL=1 # Defaults to 'auto' - Off for windows, so override to linux behavior diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index e3c7d62fe..b3b8b03b3 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "588d9344b31e6544869547c4bcd359b3b0f1d4cf" # November 16th, 2020 -ENVOY_SHA = "45935eee5714b4d85e2eb264f6e1a922999ff8e5823a49fb0c4d1255494550a8" +ENVOY_COMMIT = "8188e232a9e0b15111d30f4724cbc7bf77d3964a" # December 8th, 2020 +ENVOY_SHA = "35478b1c133197a7dd1ea1349cd3e8a09ad0169614fa5de1e2336b37ea563c67" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/extensions_build_config.bzl b/extensions_build_config.bzl index 050877713..b77ba36a4 100644 --- a/extensions_build_config.bzl +++ b/extensions_build_config.bzl @@ -8,6 +8,9 @@ EXTENSIONS = { "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", } +DISABLED_BY_DEFAULT_EXTENSIONS = { +} + # These can be changed to ["//visibility:public"], for downstream builds which # need to directly reference Envoy extensions. EXTENSION_CONFIG_VISIBILITY = ["//visibility:public"] diff --git a/source/client/BUILD b/source/client/BUILD index 4fe97d170..affc901d1 100644 --- a/source/client/BUILD +++ b/source/client/BUILD @@ -92,6 +92,7 @@ envoy_cc_library( "@envoy//source/server/config_validation:admin_lib_with_external_headers", "@envoy//include/envoy/http:protocol_interface_with_external_headers", "@envoy//source/common/common:statusor_lib_with_external_headers", + "@envoy//source/common/router:context_lib_with_external_headers", ] + select({ "//bazel:zipkin_disabled": [], "//conditions:default": [ diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 83008acad..28c3ac6e3 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -57,11 +57,11 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, // In prefetch mode we try to keep the amount of connections at the configured limit. if (prefetch_connections_) { while (host_->cluster().resourceManager(priority_).connections().canCreate()) { - // We cannot rely on ::tryCreateConnection here, because that might decline without - // updating connections().canCreate() above. We would risk an infinite loop. - Envoy::ConnectionPool::ActiveClientPtr client = instantiateActiveClient(); - connecting_stream_capacity_ += client->effectiveConcurrentStreamLimit(); - Envoy::LinkedList::moveIntoList(std::move(client), owningList(client->state_)); + // We pass in a high prefetch ratio, because we don't want to throttle the prefetched + // connection amount like Envoy does out of the box. + if (!tryCreateNewConnection(10000.0)) { + break; + } } } diff --git a/source/client/options_impl.cc b/source/client/options_impl.cc index 0ed825fe7..935a423ff 100644 --- a/source/client/options_impl.cc +++ b/source/client/options_impl.cc @@ -662,8 +662,9 @@ OptionsImpl::OptionsImpl(const nighthawk::client::CommandLineOptions& options) { allow_envoy_deprecated_v2_api_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( options, allow_envoy_deprecated_v2_api, allow_envoy_deprecated_v2_api_); if (options.has_scheduled_start()) { - const auto elapsed_since_epoch = std::chrono::nanoseconds(options.scheduled_start().nanos()) + - std::chrono::seconds(options.scheduled_start().seconds()); + const auto elapsed_since_epoch = std::chrono::duration_cast( + std::chrono::nanoseconds(options.scheduled_start().nanos()) + + std::chrono::seconds(options.scheduled_start().seconds())); scheduled_start_ = Envoy::SystemTime(std::chrono::time_point(elapsed_since_epoch)); } diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index efef72125..a26c51acf 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -53,8 +53,6 @@ #include "client/options_impl.h" #include "client/sni_utility.h" -#include "ares.h" - using namespace std::chrono_literals; namespace Nighthawk { @@ -66,14 +64,16 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory public: using Envoy::Upstream::ProdClusterManagerFactory::ProdClusterManagerFactory; - Envoy::Http::ConnectionPool::InstancePtr allocateConnPool( - Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, - Envoy::Upstream::ResourcePriority priority, Envoy::Http::Protocol protocol, - const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, - const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options) override { + Envoy::Http::ConnectionPool::InstancePtr + allocateConnPool(Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, + Envoy::Upstream::ResourcePriority priority, Envoy::Http::Protocol protocol, + const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, + const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options, + Envoy::Upstream::ClusterConnectivityState& state) override { if (protocol == Envoy::Http::Protocol::Http11 || protocol == Envoy::Http::Protocol::Http10) { auto* h1_pool = new Http1PoolImpl( host, priority, dispatcher, options, transport_socket_options, api_.randomGenerator(), + state, [](Envoy::Http::HttpConnPoolImplBase* pool) { return std::make_unique(*pool); }, @@ -90,7 +90,7 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; } return Envoy::Upstream::ProdClusterManagerFactory::allocateConnPool( - dispatcher, host, priority, protocol, options, transport_socket_options); + dispatcher, host, priority, protocol, options, transport_socket_options, state); } void setConnectionReuseStrategy( @@ -126,7 +126,8 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ singleton_manager_(std::make_unique(api_->threadFactory())), access_log_manager_(std::chrono::milliseconds(1000), *api_, *dispatcher_, access_log_lock_, store_root_), - init_watcher_("Nighthawk", []() {}), validation_context_(false, false, false) { + init_watcher_("Nighthawk", []() {}), validation_context_(false, false, false), + router_context_(store_root_.symbolTable()) { // Any dispatchers created after the following call will use hr timers. setupForHRTimers(); std::string lower = absl::AsciiStrToLower( @@ -298,6 +299,8 @@ void ProcessImpl::allowEnvoyDeprecatedV2Api(envoy::config::bootstrap::v3::Bootst proto_true.set_string_value("true"); (*runtime_layer->mutable_static_layer() ->mutable_fields())["envoy.reloadable_features.enable_deprecated_v2_api"] = proto_true; + (*runtime_layer->mutable_static_layer() + ->mutable_fields())["envoy.reloadable_features.allow_prefetch"] = proto_true; } void ProcessImpl::createBootstrapConfiguration(envoy::config::bootstrap::v3::Bootstrap& bootstrap, @@ -517,7 +520,7 @@ bool ProcessImpl::runInternal(OutputCollector& collector, const std::vectorcreateDnsResolver({}, false), *ssl_context_manager_, *dispatcher_, *local_info_, secret_manager_, validation_context_, *api_, http_context_, grpc_context_, - access_log_manager_, *singleton_manager_); + router_context_, access_log_manager_, *singleton_manager_); cluster_manager_factory_->setConnectionReuseStrategy( options_.h1ConnectionReuseStrategy() == nighthawk::client::H1ConnectionReuseStrategy::LRU ? Http1PoolImpl::ConnectionReuseStrategy::LRU diff --git a/source/client/process_impl.h b/source/client/process_impl.h index 03a99a72a..0344262db 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -21,6 +21,7 @@ #include "external/envoy/source/common/grpc/context_impl.h" #include "external/envoy/source/common/http/context_impl.h" #include "external/envoy/source/common/protobuf/message_validator_impl.h" +#include "external/envoy/source/common/router/context_impl.h" #include "external/envoy/source/common/secret/secret_manager_impl.h" #include "external/envoy/source/common/stats/allocator_impl.h" #include "external/envoy/source/common/stats/thread_local_store.h" @@ -207,6 +208,7 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable flush_worker_; + Envoy::Router::ContextImpl router_context_; }; } // namespace Client diff --git a/source/client/stream_decoder.cc b/source/client/stream_decoder.cc index 4c935e959..21c09d74c 100644 --- a/source/client/stream_decoder.cc +++ b/source/client/stream_decoder.cc @@ -103,7 +103,8 @@ void StreamDecoder::onPoolFailure(Envoy::Http::ConnectionPool::PoolFailureReason void StreamDecoder::onPoolReady(Envoy::Http::RequestEncoder& encoder, Envoy::Upstream::HostDescriptionConstSharedPtr, - const Envoy::StreamInfo::StreamInfo&) { + const Envoy::StreamInfo::StreamInfo&, + absl::optional) { // Make sure we hear about stream resets on the encoder. encoder.getStream().addCallbacks(*this); upstream_timing_.onFirstUpstreamTxByteSent(time_source_); // XXX(oschaaf): is this correct? diff --git a/source/client/stream_decoder.h b/source/client/stream_decoder.h index f641d171f..cb0f454dc 100644 --- a/source/client/stream_decoder.h +++ b/source/client/stream_decoder.h @@ -84,7 +84,8 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, Envoy::Upstream::HostDescriptionConstSharedPtr host) override; void onPoolReady(Envoy::Http::RequestEncoder& encoder, Envoy::Upstream::HostDescriptionConstSharedPtr host, - const Envoy::StreamInfo::StreamInfo& stream_info) override; + const Envoy::StreamInfo::StreamInfo& stream_info, + absl::optional protocol) override; static Envoy::StreamInfo::ResponseFlag streamResetReasonToResponseFlag(Envoy::Http::StreamResetReason reset_reason); diff --git a/source/server/README.md b/source/server/README.md index e916feaa0..ad0fb9dcd 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -165,8 +165,7 @@ USAGE: bazel-bin/nighthawk_test_server [--socket-mode ] [--socket-path ] [--disable-extensions -] [--use-fake-symbol-table -] [--cpuset-threads] +] [--cpuset-threads] [--enable-mutex-tracing] [--disable-hot-restart] [--mode ] [--parent-shutdown-time-s @@ -208,9 +207,6 @@ Path to hot restart socket file --disable-extensions Comma-separated list of extensions to disable ---use-fake-symbol-table -Use fake symbol table implementation - --cpuset-threads Get the default # of worker threads from cpuset size diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index e92cbaf13..afe062372 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -69,7 +69,6 @@ class BenchmarkClientHttpTest : public Test { (std::make_shared(header_map_param)); EXPECT_CALL(cluster_manager(), httpConnPoolForCluster(_, _, _, _)) .WillRepeatedly(Return(&pool_)); - EXPECT_CALL(cluster_manager(), get(_)).WillRepeatedly(Return(&thread_local_cluster_)); EXPECT_CALL(thread_local_cluster_, info()).WillRepeatedly(Return(cluster_info_)); auto& tracer = static_cast(*http_tracer_); @@ -120,7 +119,7 @@ class BenchmarkClientHttpTest : public Test { decoders_.push_back(&decoder); NiceMock stream_info; callbacks.onPoolReady(stream_encoder_, Envoy::Upstream::HostDescriptionConstSharedPtr{}, - stream_info); + stream_info, {} /*absl::optional protocol*/); return nullptr; }); diff --git a/test/integration/nighthawk_test_server.py b/test/integration/nighthawk_test_server.py index e4c9ea6d4..3318afdc2 100644 --- a/test/integration/nighthawk_test_server.py +++ b/test/integration/nighthawk_test_server.py @@ -95,7 +95,7 @@ class _TestCaseWarnErrorIgnoreList( "Double registration for type: 'nighthawk.server.ResponseOptions'", # Logged for normal termination, not really a warning. - "caught SIGTERM", + "caught ENVOY_SIGTERM", ), ), ]) diff --git a/test/process_test.cc b/test/process_test.cc index 077f60ef4..4296ef770 100644 --- a/test/process_test.cc +++ b/test/process_test.cc @@ -195,6 +195,12 @@ TEST(RuntimeConfiguration, allowEnvoyDeprecatedV2Api) { layers { name: "static_layer" static_layer { + fields { + key: "envoy.reloadable_features.allow_prefetch" + value { + string_value: "true" + } + } fields { key: "envoy.reloadable_features.enable_deprecated_v2_api" value { diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index cd7630389..ee00310a9 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -118,7 +118,8 @@ TEST_F(StreamDecoderTest, LatencyIsNotMeasured) { NiceMock stream_info; EXPECT_CALL(stream_encoder, encodeHeaders(Envoy::HeaderMapEqualRef(request_headers_.get()), true)); - decoder->onPoolReady(stream_encoder, ptr, stream_info); + decoder->onPoolReady(stream_encoder, ptr, stream_info, + {} /*absl::optional protocol*/); decoder->decodeHeaders(std::move(test_header_), true); EXPECT_EQ(0, connect_statistic_.count()); EXPECT_EQ(0, latency_statistic_.count()); @@ -153,7 +154,8 @@ TEST_F(StreamDecoderTest, LatencyIsMeasured) { Envoy::Upstream::HostDescriptionConstSharedPtr ptr; NiceMock stream_info; EXPECT_CALL(stream_encoder, encodeHeaders(_, true)); - decoder->onPoolReady(stream_encoder, ptr, stream_info); + decoder->onPoolReady(stream_encoder, ptr, stream_info, + {} /*absl::optional protocol*/); EXPECT_EQ(1, connect_statistic_.count()); decoder->decodeHeaders(std::move(test_header_), false); EXPECT_EQ(0, stream_decoder_export_latency_callbacks_); From 3f9d6c269cae8404106b04b74b7d85232603a040 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 14 Dec 2020 03:58:18 +0100 Subject: [PATCH 51/63] Update Envoy to 424909395c90d7d68f1afeb3427c26c7c85f2672 (#595) - The way we can obtain a http connection pool changed. Amend. - Cleanup by fix_format: strip superfluous .Times(1) in tests. Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 +- source/client/benchmark_client_impl.h | 5 ++- test/benchmark_http_client_test.cc | 7 ++-- test/client_worker_test.cc | 16 ++++---- test/factories_test.cc | 56 +++++++++++++-------------- test/flush_worker_test.cc | 4 +- test/rate_limiter_test.cc | 10 ++--- test/sequencer_test.cc | 2 +- test/stream_decoder_test.cc | 4 +- test/worker_test.cc | 6 +-- 10 files changed, 58 insertions(+), 56 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index b3b8b03b3..95a02b90c 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "8188e232a9e0b15111d30f4724cbc7bf77d3964a" # December 8th, 2020 -ENVOY_SHA = "35478b1c133197a7dd1ea1349cd3e8a09ad0169614fa5de1e2336b37ea563c67" +ENVOY_COMMIT = "424909395c90d7d68f1afeb3427c26c7c85f2672" # December 11th, 2020 +ENVOY_SHA = "d80514bcb2ea0f124681d7a05535f724846d5cef2a455f9b2d1d9a29c3ab5740" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/benchmark_client_impl.h b/source/client/benchmark_client_impl.h index 7254f83ae..48a174cdd 100644 --- a/source/client/benchmark_client_impl.h +++ b/source/client/benchmark_client_impl.h @@ -137,8 +137,9 @@ class BenchmarkClientHttpImpl : public BenchmarkClient, // Helpers Envoy::Http::ConnectionPool::Instance* pool() { auto proto = use_h2_ ? Envoy::Http::Protocol::Http2 : Envoy::Http::Protocol::Http11; - return cluster_manager_->httpConnPoolForCluster( - cluster_name_, Envoy::Upstream::ResourcePriority::Default, proto, nullptr); + const auto thread_local_cluster = cluster_manager_->getThreadLocalCluster(cluster_name_); + return thread_local_cluster->httpConnPool(Envoy::Upstream::ResourcePriority::Default, proto, + nullptr); } private: diff --git a/test/benchmark_http_client_test.cc b/test/benchmark_http_client_test.cc index afe062372..a429a1863 100644 --- a/test/benchmark_http_client_test.cc +++ b/test/benchmark_http_client_test.cc @@ -67,9 +67,10 @@ class BenchmarkClientHttpTest : public Test { {":scheme", "http"}, {":method", "GET"}, {":path", "/"}, {":host", "localhost"}}; default_header_map_ = (std::make_shared(header_map_param)); - EXPECT_CALL(cluster_manager(), httpConnPoolForCluster(_, _, _, _)) - .WillRepeatedly(Return(&pool_)); + EXPECT_CALL(cluster_manager(), getThreadLocalCluster(_)) + .WillRepeatedly(Return(&thread_local_cluster_)); EXPECT_CALL(thread_local_cluster_, info()).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(thread_local_cluster_, httpConnPool(_, _, _)).WillRepeatedly(Return(&pool_)); auto& tracer = static_cast(*http_tracer_); EXPECT_CALL(tracer, startSpan_(_, _, _, _)) @@ -357,7 +358,7 @@ TEST_F(BenchmarkClientHttpTest, RequestMethodPost) { return std::make_unique(header); }; - EXPECT_CALL(stream_encoder_, encodeData(_, _)).Times(1); + EXPECT_CALL(stream_encoder_, encodeData(_, _)); auto client_setup_parameters = ClientSetupParameters(1, 1, 1, request_generator); verifyBenchmarkClientProcessesExpectedInflightRequests(client_setup_parameters); EXPECT_EQ(1, getCounter("http_2xx")); diff --git a/test/client_worker_test.cc b/test/client_worker_test.cc index 8ffdf6680..e143b9635 100644 --- a/test/client_worker_test.cc +++ b/test/client_worker_test.cc @@ -54,7 +54,7 @@ class ClientWorkerTest : public Test { EXPECT_CALL(request_generator_factory_, create(_, _, _, _)) .Times(1) .WillOnce(Return(ByMove(std::unique_ptr(request_generator_)))); - EXPECT_CALL(*request_generator_, initOnThread()).Times(1); + EXPECT_CALL(*request_generator_, initOnThread()); EXPECT_CALL(termination_predicate_factory_, create(_, _, _)) .WillOnce(Return(ByMove(createMockTerminationPredicate()))); @@ -105,13 +105,13 @@ TEST_F(ClientWorkerTest, BasicTest) { { InSequence dummy; - EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(false)).Times(1); + EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(false)); EXPECT_CALL(*benchmark_client_, tryStartRequest(_)) .WillOnce(Invoke(this, &ClientWorkerTest::CheckThreadChanged)); - EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(true)).Times(1); - EXPECT_CALL(*sequencer_, start).Times(1); - EXPECT_CALL(*sequencer_, waitForCompletion).Times(1); - EXPECT_CALL(*benchmark_client_, terminate()).Times(1); + EXPECT_CALL(*benchmark_client_, setShouldMeasureLatencies(true)); + EXPECT_CALL(*sequencer_, start); + EXPECT_CALL(*sequencer_, waitForCompletion); + EXPECT_CALL(*benchmark_client_, terminate()); } int worker_number = 12345; @@ -123,8 +123,8 @@ TEST_F(ClientWorkerTest, BasicTest) { worker->start(); worker->waitForCompletion(); - EXPECT_CALL(*benchmark_client_, statistics()).Times(1).WillOnce(Return(createStatisticPtrMap())); - EXPECT_CALL(*sequencer_, statistics()).Times(1).WillOnce(Return(createStatisticPtrMap())); + EXPECT_CALL(*benchmark_client_, statistics()).WillOnce(Return(createStatisticPtrMap())); + EXPECT_CALL(*sequencer_, statistics()).WillOnce(Return(createStatisticPtrMap())); auto statistics = worker->statistics(); EXPECT_EQ(2, statistics.size()); diff --git a/test/factories_test.cc b/test/factories_test.cc index 946ed1d45..b1f9cec12 100644 --- a/test/factories_test.cc +++ b/test/factories_test.cc @@ -36,15 +36,15 @@ class FactoriesTest : public Test { TEST_F(FactoriesTest, CreateBenchmarkClient) { BenchmarkClientFactoryImpl factory(options_); Envoy::Upstream::ClusterManagerPtr cluster_manager; - EXPECT_CALL(options_, connections()).Times(1); - EXPECT_CALL(options_, h2()).Times(1); - EXPECT_CALL(options_, maxPendingRequests()).Times(1); - EXPECT_CALL(options_, maxActiveRequests()).Times(1); - EXPECT_CALL(options_, maxRequestsPerConnection()).Times(1); - EXPECT_CALL(options_, openLoop()).Times(1); - EXPECT_CALL(options_, responseHeaderWithLatencyInput()).Times(1); + EXPECT_CALL(options_, connections()); + EXPECT_CALL(options_, h2()); + EXPECT_CALL(options_, maxPendingRequests()); + EXPECT_CALL(options_, maxActiveRequests()); + EXPECT_CALL(options_, maxRequestsPerConnection()); + EXPECT_CALL(options_, openLoop()); + EXPECT_CALL(options_, responseHeaderWithLatencyInput()); auto cmd = std::make_unique(); - EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); StaticRequestSourceImpl request_generator( std::make_unique()); auto benchmark_client = @@ -71,10 +71,10 @@ TEST_F(FactoriesTest, CreateRequestSourcePluginWithWorkingJsonReturnsWorkingRequ Envoy::MessageUtil::loadFromJson(request_source_plugin_config_json, request_source_plugin_config.value(), Envoy::ProtobufMessage::getStrictValidationVisitor()); - EXPECT_CALL(options_, requestMethod()).Times(1); - EXPECT_CALL(options_, requestBodySize()).Times(1); + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); - EXPECT_CALL(options_, requestSource()).Times(1); + EXPECT_CALL(options_, requestSource()); EXPECT_CALL(options_, requestSourcePluginConfig()) .Times(2) .WillRepeatedly(ReturnRef(request_source_plugin_config)); @@ -83,7 +83,7 @@ TEST_F(FactoriesTest, CreateRequestSourcePluginWithWorkingJsonReturnsWorkingRequ cmd->mutable_request_options()->add_request_headers(); request_headers->mutable_header()->set_key("foo"); request_headers->mutable_header()->set_value("bar"); - EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); RequestSourceFactoryImpl factory(options_, *api_); Envoy::Upstream::ClusterManagerPtr cluster_manager; Nighthawk::RequestSourcePtr request_source = factory.create( @@ -111,10 +111,10 @@ TEST_F(FactoriesTest, CreateRequestSourcePluginWithNonWorkingJsonThrowsError) { Envoy::MessageUtil::loadFromJson(request_source_plugin_config_json, request_source_plugin_config.value(), Envoy::ProtobufMessage::getStrictValidationVisitor()); - EXPECT_CALL(options_, requestMethod()).Times(1); - EXPECT_CALL(options_, requestBodySize()).Times(1); + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); - EXPECT_CALL(options_, requestSource()).Times(1); + EXPECT_CALL(options_, requestSource()); EXPECT_CALL(options_, requestSourcePluginConfig()) .Times(2) .WillRepeatedly(ReturnRef(request_source_plugin_config)); @@ -123,7 +123,7 @@ TEST_F(FactoriesTest, CreateRequestSourcePluginWithNonWorkingJsonThrowsError) { cmd->mutable_request_options()->add_request_headers(); request_headers->mutable_header()->set_key("foo"); request_headers->mutable_header()->set_value("bar"); - EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); RequestSourceFactoryImpl factory(options_, *api_); Envoy::Upstream::ClusterManagerPtr cluster_manager; EXPECT_THROW_WITH_REGEX( @@ -135,10 +135,10 @@ TEST_F(FactoriesTest, CreateRequestSourcePluginWithNonWorkingJsonThrowsError) { TEST_F(FactoriesTest, CreateRequestSource) { absl::optional request_source_plugin_config; - EXPECT_CALL(options_, requestMethod()).Times(1); - EXPECT_CALL(options_, requestBodySize()).Times(1); + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); - EXPECT_CALL(options_, requestSource()).Times(1); + EXPECT_CALL(options_, requestSource()); EXPECT_CALL(options_, requestSourcePluginConfig()) .Times(1) .WillRepeatedly(ReturnRef(request_source_plugin_config)); @@ -147,7 +147,7 @@ TEST_F(FactoriesTest, CreateRequestSource) { cmd->mutable_request_options()->add_request_headers(); request_headers->mutable_header()->set_key("foo"); request_headers->mutable_header()->set_value("bar"); - EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); RequestSourceFactoryImpl factory(options_, *api_); Envoy::Upstream::ClusterManagerPtr cluster_manager; RequestSourcePtr request_generator = factory.create( @@ -157,17 +157,17 @@ TEST_F(FactoriesTest, CreateRequestSource) { TEST_F(FactoriesTest, CreateRemoteRequestSource) { absl::optional request_source_plugin_config; - EXPECT_CALL(options_, requestMethod()).Times(1); - EXPECT_CALL(options_, requestBodySize()).Times(1); + EXPECT_CALL(options_, requestMethod()); + EXPECT_CALL(options_, requestBodySize()); EXPECT_CALL(options_, uri()).Times(2).WillRepeatedly(Return("http://foo/")); - EXPECT_CALL(options_, requestSource()).Times(1).WillRepeatedly(Return("http://bar/")); - EXPECT_CALL(options_, requestsPerSecond()).Times(1).WillRepeatedly(Return(5)); + EXPECT_CALL(options_, requestSource()).WillOnce(Return("http://bar/")); + EXPECT_CALL(options_, requestsPerSecond()).WillOnce(Return(5)); auto cmd = std::make_unique(); envoy::config::core::v3::HeaderValueOption* request_headers = cmd->mutable_request_options()->add_request_headers(); request_headers->mutable_header()->set_key("foo"); request_headers->mutable_header()->set_value("bar"); - EXPECT_CALL(options_, toCommandLineOptions()).Times(1).WillOnce(Return(ByMove(std::move(cmd)))); + EXPECT_CALL(options_, toCommandLineOptions()).WillOnce(Return(ByMove(std::move(cmd)))); RequestSourceFactoryImpl factory(options_, *api_); Envoy::Upstream::ClusterManagerPtr cluster_manager; RequestSourcePtr request_generator = factory.create( @@ -185,13 +185,13 @@ class SequencerFactoryTest sequencer_idle_strategy) { SequencerFactoryImpl factory(options_); MockBenchmarkClient benchmark_client; - EXPECT_CALL(options_, requestsPerSecond()).Times(1).WillOnce(Return(1)); - EXPECT_CALL(options_, burstSize()).Times(1).WillOnce(Return(2)); + EXPECT_CALL(options_, requestsPerSecond()).WillOnce(Return(1)); + EXPECT_CALL(options_, burstSize()).WillOnce(Return(2)); EXPECT_CALL(options_, sequencerIdleStrategy()) .Times(1) .WillOnce(Return(sequencer_idle_strategy)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); - EXPECT_CALL(options_, jitterUniform()).Times(1).WillOnce(Return(1ns)); + EXPECT_CALL(options_, jitterUniform()).WillOnce(Return(1ns)); Envoy::Event::SimulatedTimeSystem time_system; const SequencerTarget dummy_sequencer_target = [](const CompletionCallback&) -> bool { return true; diff --git a/test/flush_worker_test.cc b/test/flush_worker_test.cc index 42b390265..d9d2be38b 100644 --- a/test/flush_worker_test.cc +++ b/test/flush_worker_test.cc @@ -132,7 +132,7 @@ TEST_F(FlushWorkerTest, WorkerFlushStatsPeriodically) { thread.join(); // Stats flush should happen exactly once as the final flush is done in // FlushWorkerImpl::shutdownThread(). - EXPECT_CALL(*sink_, flush(_)).Times(1); + EXPECT_CALL(*sink_, flush(_)); worker.shutdown(); } @@ -147,7 +147,7 @@ TEST_F(FlushWorkerTest, FinalFlush) { worker.waitForCompletion(); // Stats flush should happen exactly once as the final flush is done in // FlushWorkerImpl::shutdownThread(). - EXPECT_CALL(*sink_, flush(_)).Times(1); + EXPECT_CALL(*sink_, flush(_)); worker.shutdown(); } diff --git a/test/rate_limiter_test.cc b/test/rate_limiter_test.cc index 935cdada6..98f253365 100644 --- a/test/rate_limiter_test.cc +++ b/test/rate_limiter_test.cc @@ -65,7 +65,7 @@ TEST_F(RateLimiterTest, BurstingRateLimiterTest) { rate_limiter->releaseOne(); EXPECT_TRUE(rate_limiter->tryAcquireOne()); EXPECT_TRUE(rate_limiter->tryAcquireOne()); - EXPECT_CALL(unsafe_mock_rate_limiter, tryAcquireOne).Times(1).WillOnce(Return(false)); + EXPECT_CALL(unsafe_mock_rate_limiter, tryAcquireOne).WillOnce(Return(false)); EXPECT_FALSE(rate_limiter->tryAcquireOne()); } @@ -260,7 +260,7 @@ TEST_F(DistributionSamplingRateLimiterTest, ReleaseOneFunctionsWhenAcquired) { EXPECT_CALL(mock_inner_rate_limiter_, tryAcquireOne).WillOnce(Return(true)); EXPECT_CALL(mock_discrete_numeric_distribution_sampler_, getValue).WillOnce(Return(0)); EXPECT_TRUE(rate_limiter_->tryAcquireOne()); - EXPECT_CALL(mock_inner_rate_limiter_, releaseOne).Times(1); + EXPECT_CALL(mock_inner_rate_limiter_, releaseOne); rate_limiter_->releaseOne(); } @@ -421,7 +421,7 @@ TEST_F(RateLimiterTest, GraduallyOpeningRateLimiterFilterInvalidArgumentTest) { // Pass in a badly configured distribution sampler. auto bad_distribution_sampler = std::make_unique(); - EXPECT_CALL(*bad_distribution_sampler, min).Times(1).WillOnce(Return(0)); + EXPECT_CALL(*bad_distribution_sampler, min).WillOnce(Return(0)); EXPECT_THROW( GraduallyOpeningRateLimiterFilter gorl(1s, std::move(bad_distribution_sampler), std::make_unique>()); @@ -429,8 +429,8 @@ TEST_F(RateLimiterTest, GraduallyOpeningRateLimiterFilterInvalidArgumentTest) { bad_distribution_sampler = std::make_unique(); // Correct min, but now introduce a bad max. - EXPECT_CALL(*bad_distribution_sampler, min).Times(1).WillOnce(Return(1)); - EXPECT_CALL(*bad_distribution_sampler, max).Times(1).WillOnce(Return(99)); + EXPECT_CALL(*bad_distribution_sampler, min).WillOnce(Return(1)); + EXPECT_CALL(*bad_distribution_sampler, max).WillOnce(Return(99)); EXPECT_THROW( GraduallyOpeningRateLimiterFilter gorl(1s, std::move(bad_distribution_sampler), std::make_unique>()); diff --git a/test/sequencer_test.cc b/test/sequencer_test.cc index 82ad3f69e..d87cb5462 100644 --- a/test/sequencer_test.cc +++ b/test/sequencer_test.cc @@ -201,7 +201,7 @@ TEST_F(SequencerTestWithTimerEmulation, RateLimiterSaturatedTargetInteraction) { EXPECT_CALL(*target(), callback(_)).Times(2).WillOnce(Return(true)).WillOnce(Return(false)); // The sequencer should call RateLimiter::releaseOne() when the target returns false. - EXPECT_CALL(rate_limiter_unsafe_ref_, releaseOne()).Times(1); + EXPECT_CALL(rate_limiter_unsafe_ref_, releaseOne()); expectDispatcherRun(); EXPECT_CALL(platform_util_, sleep(_)).Times(AtLeast(1)); diff --git a/test/stream_decoder_test.cc b/test/stream_decoder_test.cc index ee00310a9..8614f4f32 100644 --- a/test/stream_decoder_test.cc +++ b/test/stream_decoder_test.cc @@ -136,9 +136,9 @@ TEST_F(StreamDecoderTest, LatencyIsMeasured) { const Envoy::Tracing::Decision) -> Envoy::Tracing::Span* { EXPECT_EQ(Envoy::Tracing::OperationName::Egress, config.operationName()); auto* span = new Envoy::Tracing::MockSpan(); - EXPECT_CALL(*span, injectContext(_)).Times(1); + EXPECT_CALL(*span, injectContext(_)); EXPECT_CALL(*span, setTag(_, _)).Times(12); - EXPECT_CALL(*span, finishSpan()).Times(1); + EXPECT_CALL(*span, finishSpan()); return span; })); diff --git a/test/worker_test.cc b/test/worker_test.cc index c30a8b323..b0fb2a280 100644 --- a/test/worker_test.cc +++ b/test/worker_test.cc @@ -45,8 +45,8 @@ class WorkerTest : public Test { TEST_F(WorkerTest, WorkerExecutesOnThread) { InSequence in_sequence; - EXPECT_CALL(tls_, registerThread(_, false)).Times(1); - EXPECT_CALL(tls_, allocateSlot()).Times(1); + EXPECT_CALL(tls_, registerThread(_, false)); + EXPECT_CALL(tls_, allocateSlot()); TestWorker worker(*api_, tls_); NiceMock dispatcher; @@ -57,7 +57,7 @@ TEST_F(WorkerTest, WorkerExecutesOnThread) { worker.start(); worker.waitForCompletion(); - EXPECT_CALL(tls_, shutdownThread()).Times(1); + EXPECT_CALL(tls_, shutdownThread()); ASSERT_TRUE(worker.ran_); worker.shutdown(); } From 88d3bf411c1c5cb812b77997993c81a4d5475b36 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Wed, 16 Dec 2020 17:53:05 -0500 Subject: [PATCH 52/63] Optimize calls to std::string::find() and friends for a single char. (#598) The character literal overload is more efficient. Signed-off-by: Yan Avlasov --- source/common/utility.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/utility.cc b/source/common/utility.cc index 922aa5249..2e9430309 100644 --- a/source/common/utility.cc +++ b/source/common/utility.cc @@ -38,9 +38,9 @@ Utility::mapCountersFromStore(const Envoy::Stats::Store& store, size_t Utility::findPortSeparator(absl::string_view hostname) { if (hostname.size() > 0 && hostname[0] == '[') { - return hostname.find(":", hostname.find(']')); + return hostname.find(':', hostname.find(']')); } - return hostname.rfind(":"); + return hostname.rfind(':'); } Envoy::Network::DnsLookupFamily From 5ab9fadd8efbedbb4d8fae32a8191b54f0d68f69 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 21 Dec 2020 17:54:04 +0100 Subject: [PATCH 53/63] Update Envoy to 867b9e23d2e48350bd1b0d1fbc392a8355f20e35 (#599) - Unbreak changed allocateConnPool() method usage: changes with respect to ALPN allow negotiation of a protocol, which is backed in Envoy by a pool which supports >1 protocols. This update leaves a code-level comment plus a RELEASE_ASSERT when a multi-protocol pool is allocated. - Avoid MOCK_METHODn as per new check_format objections: Change our mocks to use MOCK_METHOD instead. Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- source/client/process_impl.cc | 14 +++++++++++--- test/mocks/client/mock_benchmark_client.h | 14 +++++++------- test/mocks/common/mock_rate_limiter.h | 16 ++++++++-------- test/mocks/common/mock_request_source.h | 4 ++-- test/mocks/common/mock_sequencer.h | 14 +++++++------- test/mocks/common/mock_termination_predicate.h | 8 ++++---- test/sequencer_test.cc | 2 +- 8 files changed, 42 insertions(+), 34 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 95a02b90c..b5f1df7c1 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "424909395c90d7d68f1afeb3427c26c7c85f2672" # December 11th, 2020 -ENVOY_SHA = "d80514bcb2ea0f124681d7a05535f724846d5cef2a455f9b2d1d9a29c3ab5740" +ENVOY_COMMIT = "867b9e23d2e48350bd1b0d1fbc392a8355f20e35" # December 20th, 2020 +ENVOY_SHA = "b98a88bbff0c64ff08f88d2a4379dd708d1df012424d6b65c7e32773ce249a53" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index a26c51acf..9350f2560 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -66,10 +66,18 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory Envoy::Http::ConnectionPool::InstancePtr allocateConnPool(Envoy::Event::Dispatcher& dispatcher, Envoy::Upstream::HostConstSharedPtr host, - Envoy::Upstream::ResourcePriority priority, Envoy::Http::Protocol protocol, + Envoy::Upstream::ResourcePriority priority, + std::vector& protocols, const Envoy::Network::ConnectionSocket::OptionsSharedPtr& options, const Envoy::Network::TransportSocketOptionsSharedPtr& transport_socket_options, Envoy::Upstream::ClusterConnectivityState& state) override { + // This changed in + // https://github.com/envoyproxy/envoy/commit/93ee668a690d297ab5e8bd2cbf03771d852ebbda ALPN may + // be set up to negotiate a protocol, in which case we'd need a HttpConnPoolImplMixed. However, + // our integration tests pass, and for now this might suffice. In case we do run into the need + // for supporting multiple procols in a single pool, ensure we hear about it soon, by asserting. + RELEASE_ASSERT(protocols.size() == 1, "Expected a single protocol in protocols vector."); + const Envoy::Http::Protocol& protocol = protocols[0]; if (protocol == Envoy::Http::Protocol::Http11 || protocol == Envoy::Http::Protocol::Http10) { auto* h1_pool = new Http1PoolImpl( host, priority, dispatcher, options, transport_socket_options, api_.randomGenerator(), @@ -84,13 +92,13 @@ class ClusterManagerFactory : public Envoy::Upstream::ProdClusterManagerFactory data.host_description_, pool->dispatcher(), pool->randomGenerator())}; return codec; }, - std::vector{protocol}); + protocols); h1_pool->setConnectionReuseStrategy(connection_reuse_strategy_); h1_pool->setPrefetchConnections(prefetch_connections_); return Envoy::Http::ConnectionPool::InstancePtr{h1_pool}; } return Envoy::Upstream::ProdClusterManagerFactory::allocateConnPool( - dispatcher, host, priority, protocol, options, transport_socket_options, state); + dispatcher, host, priority, protocols, options, transport_socket_options, state); } void setConnectionReuseStrategy( diff --git a/test/mocks/client/mock_benchmark_client.h b/test/mocks/client/mock_benchmark_client.h index c8d2ba9a3..15b1babea 100644 --- a/test/mocks/client/mock_benchmark_client.h +++ b/test/mocks/client/mock_benchmark_client.h @@ -11,13 +11,13 @@ class MockBenchmarkClient : public BenchmarkClient { public: MockBenchmarkClient(); - MOCK_METHOD0(terminate, void()); - MOCK_METHOD1(setShouldMeasureLatencies, void(bool)); - MOCK_CONST_METHOD0(statistics, StatisticPtrMap()); - MOCK_METHOD1(tryStartRequest, bool(Client::CompletionCallback)); - MOCK_CONST_METHOD0(scope, Envoy::Stats::Scope&()); - MOCK_CONST_METHOD0(shouldMeasureLatencies, bool()); - MOCK_CONST_METHOD0(requestHeaders, const Envoy::Http::RequestHeaderMap&()); + MOCK_METHOD(void, terminate, ()); + MOCK_METHOD(void, setShouldMeasureLatencies, (bool)); + MOCK_METHOD(StatisticPtrMap, statistics, (), (const)); + MOCK_METHOD(bool, tryStartRequest, (Client::CompletionCallback)); + MOCK_METHOD(Envoy::Stats::Scope&, scope, (), (const)); + MOCK_METHOD(bool, shouldMeasureLatencies, (), (const)); + MOCK_METHOD(const Envoy::Http::RequestHeaderMap&, requestHeaders, (), (const)); }; } // namespace Client diff --git a/test/mocks/common/mock_rate_limiter.h b/test/mocks/common/mock_rate_limiter.h index a36bc154f..71e02a40a 100644 --- a/test/mocks/common/mock_rate_limiter.h +++ b/test/mocks/common/mock_rate_limiter.h @@ -10,19 +10,19 @@ class MockRateLimiter : public RateLimiter { public: MockRateLimiter(); - MOCK_METHOD0(tryAcquireOne, bool()); - MOCK_METHOD0(releaseOne, void()); - MOCK_METHOD0(timeSource, Envoy::TimeSource&()); - MOCK_METHOD0(elapsed, std::chrono::nanoseconds()); - MOCK_CONST_METHOD0(firstAcquisitionTime, absl::optional()); + MOCK_METHOD(bool, tryAcquireOne, ()); + MOCK_METHOD(void, releaseOne, ()); + MOCK_METHOD(Envoy::TimeSource&, timeSource, ()); + MOCK_METHOD(std::chrono::nanoseconds, elapsed, ()); + MOCK_METHOD(absl::optional, firstAcquisitionTime, (), (const)); }; class MockDiscreteNumericDistributionSampler : public DiscreteNumericDistributionSampler { public: MockDiscreteNumericDistributionSampler(); - MOCK_METHOD0(getValue, uint64_t()); - MOCK_CONST_METHOD0(min, uint64_t()); - MOCK_CONST_METHOD0(max, uint64_t()); + MOCK_METHOD(uint64_t, getValue, ()); + MOCK_METHOD(uint64_t, min, (), (const)); + MOCK_METHOD(uint64_t, max, (), (const)); }; } // namespace Nighthawk diff --git a/test/mocks/common/mock_request_source.h b/test/mocks/common/mock_request_source.h index ab3b7694a..268cc8ebd 100644 --- a/test/mocks/common/mock_request_source.h +++ b/test/mocks/common/mock_request_source.h @@ -9,8 +9,8 @@ namespace Nighthawk { class MockRequestSource : public RequestSource { public: MockRequestSource(); - MOCK_METHOD0(get, RequestGenerator()); - MOCK_METHOD0(initOnThread, void()); + MOCK_METHOD(RequestGenerator, get, ()); + MOCK_METHOD(void, initOnThread, ()); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_sequencer.h b/test/mocks/common/mock_sequencer.h index 7d22434b7..dd93205a1 100644 --- a/test/mocks/common/mock_sequencer.h +++ b/test/mocks/common/mock_sequencer.h @@ -11,13 +11,13 @@ class MockSequencer : public Sequencer { public: MockSequencer(); - MOCK_METHOD0(start, void()); - MOCK_METHOD0(waitForCompletion, void()); - MOCK_CONST_METHOD0(completionsPerSecond, double()); - MOCK_CONST_METHOD0(executionDuration, std::chrono::nanoseconds()); - MOCK_CONST_METHOD0(statistics, StatisticPtrMap()); - MOCK_METHOD0(cancel, void()); - MOCK_CONST_METHOD0(rate_limiter, RateLimiter&()); + MOCK_METHOD(void, start, ()); + MOCK_METHOD(void, waitForCompletion, ()); + MOCK_METHOD(double, completionsPerSecond, (), (const)); + MOCK_METHOD(std::chrono::nanoseconds, executionDuration, (), (const)); + MOCK_METHOD(StatisticPtrMap, statistics, (), (const)); + MOCK_METHOD(void, cancel, ()); + MOCK_METHOD(RateLimiter&, rate_limiter, (), (const)); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/mocks/common/mock_termination_predicate.h b/test/mocks/common/mock_termination_predicate.h index 91077148e..da5c929ed 100644 --- a/test/mocks/common/mock_termination_predicate.h +++ b/test/mocks/common/mock_termination_predicate.h @@ -9,10 +9,10 @@ namespace Nighthawk { class MockTerminationPredicate : public TerminationPredicate { public: MockTerminationPredicate(); - MOCK_METHOD1(link, TerminationPredicate&(TerminationPredicatePtr&&)); - MOCK_METHOD1(appendToChain, TerminationPredicate&(TerminationPredicatePtr&&)); - MOCK_METHOD0(evaluateChain, TerminationPredicate::Status()); - MOCK_METHOD0(evaluate, TerminationPredicate::Status()); + MOCK_METHOD(TerminationPredicate&, link, (TerminationPredicatePtr && p)); + MOCK_METHOD(TerminationPredicate&, appendToChain, (TerminationPredicatePtr && p)); + MOCK_METHOD(TerminationPredicate::Status, evaluateChain, ()); + MOCK_METHOD(TerminationPredicate::Status, evaluate, ()); }; } // namespace Nighthawk \ No newline at end of file diff --git a/test/sequencer_test.cc b/test/sequencer_test.cc index d87cb5462..b87f1730f 100644 --- a/test/sequencer_test.cc +++ b/test/sequencer_test.cc @@ -34,7 +34,7 @@ class FakeSequencerTarget { class MockSequencerTarget : public FakeSequencerTarget { public: - MOCK_METHOD1(callback, bool(OperationCallback)); + MOCK_METHOD(bool, callback, (OperationCallback)); }; class SequencerTestBase : public testing::Test { From 308e6bea86df0c58c73d248cc2bcfde2fe6c3f0d Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 6 Jan 2021 01:49:22 +0100 Subject: [PATCH 54/63] Update Envoy to 937f013 (Jan 5th 2021) (#601) - Transitively updates the grpc dependency, which eliminated the grpc_impl namespace. s/grpc_impl::/grpc::/ - Envoy::LocalInfo::LocalInfoImpl constructor now requires passing a symbol table. Pass that in. Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 +- source/client/process_impl.cc | 3 +- test/common/nighthawk_service_client_test.cc | 74 +++++++++----------- 3 files changed, 39 insertions(+), 42 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index b5f1df7c1..b1be82202 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "867b9e23d2e48350bd1b0d1fbc392a8355f20e35" # December 20th, 2020 -ENVOY_SHA = "b98a88bbff0c64ff08f88d2a4379dd708d1df012424d6b65c7e32773ce249a53" +ENVOY_COMMIT = "937f0133355cf2d8eb9e75d3f09e296bb63951be" # Jan 5th, 2021 +ENVOY_SHA = "5bdb98e2f2bad83ac672862e399e4ae2d0ca28c31740a3af16d154fc37401ca7" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 9350f2560..52523d3f5 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -127,7 +127,8 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ request_generator_factory_(options, *api_), options_(options), init_manager_("nh_init_manager"), local_info_(new Envoy::LocalInfo::LocalInfoImpl( - {}, Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), + store_root_.symbolTable(), {}, + Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), "nighthawk_service_zone", "nighthawk_service_cluster", "nighthawk_service_node")), secret_manager_(config_tracker_), http_context_(store_root_.symbolTable()), grpc_context_(store_root_.symbolTable()), diff --git a/test/common/nighthawk_service_client_test.cc b/test/common/nighthawk_service_client_test.cc index b9a385857..f74e7d44f 100644 --- a/test/common/nighthawk_service_client_test.cc +++ b/test/common/nighthawk_service_client_test.cc @@ -33,7 +33,7 @@ TEST(PerformNighthawkBenchmark, UsesSpecifiedCommandLineOptions) { // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([&request](grpc_impl::ClientContext*) { + .WillOnce([&request](grpc::ClientContext*) { auto* mock_reader_writer = new grpc::testing::MockClientReaderWriter(); // PerformNighthawkBenchmark currently expects Read to return true exactly once. @@ -61,7 +61,7 @@ TEST(PerformNighthawkBenchmark, ReturnsNighthawkResponseSuccessfully) { // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([&expected_response](grpc_impl::ClientContext*) { + .WillOnce([&expected_response](grpc::ClientContext*) { auto* mock_reader_writer = new grpc::testing::MockClientReaderWriter(); // PerformNighthawkBenchmark currently expects Read to return true exactly once. @@ -88,15 +88,14 @@ TEST(PerformNighthawkBenchmark, ReturnsErrorIfNighthawkServiceDoesNotSendRespons nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(false)); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(false)); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = @@ -111,13 +110,12 @@ TEST(PerformNighthawkBenchmark, ReturnsErrorIfNighthawkServiceWriteFails) { nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(false)); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(false)); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = @@ -131,14 +129,13 @@ TEST(PerformNighthawkBenchmark, ReturnsErrorIfNighthawkServiceWritesDoneFails) { nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(false)); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(false)); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = @@ -152,19 +149,18 @@ TEST(PerformNighthawkBenchmark, PropagatesErrorIfNighthawkServiceGrpcStreamClose nighthawk::client::MockNighthawkServiceStub mock_nighthawk_service_stub; // Configure the mock Nighthawk Service stub to return an inner mock channel when the code under // test requests a channel. Set call expectations on the inner mock channel. - EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw) - .WillOnce([](grpc_impl::ClientContext*) { - auto* mock_reader_writer = - new grpc::testing::MockClientReaderWriter(); - // PerformNighthawkBenchmark currently expects Read to return true exactly once. - EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(true)).WillOnce(Return(false)); - EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); - EXPECT_CALL(*mock_reader_writer, Finish()) - .WillOnce( - Return(::grpc::Status(::grpc::PERMISSION_DENIED, "Finish failure status message"))); - return mock_reader_writer; - }); + EXPECT_CALL(mock_nighthawk_service_stub, ExecutionStreamRaw).WillOnce([](grpc::ClientContext*) { + auto* mock_reader_writer = + new grpc::testing::MockClientReaderWriter(); + // PerformNighthawkBenchmark currently expects Read to return true exactly once. + EXPECT_CALL(*mock_reader_writer, Read(_)).WillOnce(Return(true)).WillOnce(Return(false)); + EXPECT_CALL(*mock_reader_writer, Write(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, WritesDone()).WillOnce(Return(true)); + EXPECT_CALL(*mock_reader_writer, Finish()) + .WillOnce( + Return(::grpc::Status(::grpc::PERMISSION_DENIED, "Finish failure status message"))); + return mock_reader_writer; + }); NighthawkServiceClientImpl client; absl::StatusOr response_or = From 6b04186df71ac47c80656137a578e0d971d5ad92 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 12 Jan 2021 03:37:21 +0100 Subject: [PATCH 55/63] Signal handler: don't fire on destruction (#602) Fixes a bug observed while working on horizontal scaling support: SignalHandler would fire on destruction. Fix & add tests. Note: pre-emptive, I don't think this is an issue in the current state of master. But some of the CI errors in #600 seemed familiar and hopefully merging this in there helps. (Split out from https://github.com/oschaaf/nighthawk/tree/horizontal-scaling) Signed-off-by: Otto van der Schaaf --- source/common/signal_handler.cc | 7 ++++-- source/common/signal_handler.h | 3 ++- test/common/BUILD | 9 ++++++++ test/common/signal_handler_test.cc | 35 ++++++++++++++++++++++++++++++ 4 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 test/common/signal_handler_test.cc diff --git a/source/common/signal_handler.cc b/source/common/signal_handler.cc index aa9316e66..59c86bc11 100644 --- a/source/common/signal_handler.cc +++ b/source/common/signal_handler.cc @@ -23,7 +23,9 @@ SignalHandler::SignalHandler(const std::function& signal_callback) { RELEASE_ASSERT(close(pipe_fds_[0]) == 0, "read side close failed"); RELEASE_ASSERT(close(pipe_fds_[1]) == 0, "write side close failed"); pipe_fds_.clear(); - signal_callback(); + if (!destructing_) { + signal_callback(); + } }); signal_handler_delegate = [this](int) { onSignal(); }; @@ -32,6 +34,7 @@ SignalHandler::SignalHandler(const std::function& signal_callback) { } SignalHandler::~SignalHandler() { + destructing_ = true; initiateShutdown(); if (shutdown_thread_.joinable()) { shutdown_thread_.join(); @@ -47,4 +50,4 @@ void SignalHandler::initiateShutdown() { void SignalHandler::onSignal() { initiateShutdown(); } -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/source/common/signal_handler.h b/source/common/signal_handler.h index 3ff36374f..c69a45869 100644 --- a/source/common/signal_handler.h +++ b/source/common/signal_handler.h @@ -69,8 +69,9 @@ class SignalHandler final : public Envoy::Logger::Loggable pipe_fds_; + bool destructing_{false}; }; using SignalHandlerPtr = std::unique_ptr; -} // namespace Nighthawk \ No newline at end of file +} // namespace Nighthawk diff --git a/test/common/BUILD b/test/common/BUILD index 4c0547785..54c96ad4d 100644 --- a/test/common/BUILD +++ b/test/common/BUILD @@ -38,3 +38,12 @@ envoy_cc_test( "@com_github_grpc_grpc//:grpc++_test", ], ) + +envoy_cc_test( + name = "signal_handler_test", + srcs = ["signal_handler_test.cc"], + repository = "@envoy", + deps = [ + "//source/common:nighthawk_common_lib", + ], +) diff --git a/test/common/signal_handler_test.cc b/test/common/signal_handler_test.cc new file mode 100644 index 000000000..cabb2251b --- /dev/null +++ b/test/common/signal_handler_test.cc @@ -0,0 +1,35 @@ +#include +#include + +#include "common/signal_handler.h" + +#include "gtest/gtest.h" + +namespace Nighthawk { +namespace { + +TEST(SignalHandlerTest, SignalGetsHandled) { + for (const auto signal : {SIGTERM, SIGINT}) { + bool signal_handled = false; + std::promise signal_all_threads_running; + + SignalHandler signal_handler([&signal_handled, &signal_all_threads_running]() { + signal_handled = true; + signal_all_threads_running.set_value(); + }); + std::raise(signal); + signal_all_threads_running.get_future().wait(); + EXPECT_TRUE(signal_handled); + } +} + +TEST(SignalHandlerTest, DestructDoesNotFireHandler) { + bool signal_handled = false; + { + SignalHandler signal_handler([&signal_handled]() { signal_handled = true; }); + } + EXPECT_FALSE(signal_handled); +} + +} // namespace +} // namespace Nighthawk From e6f9af70d446ee018f1d72f8e46b8c1a22c186f4 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Wed, 13 Jan 2021 11:06:14 -0500 Subject: [PATCH 56/63] Update Envoy to c2522c69f (Jan 11th 2021). (#604) - `Envoy::StreamInfo::StreamInfoImpl` no longer contains methods that set local and remote socket addresses. It instead requires a `Envoy::Network::SocketAddressProviderSharedPtr` with the addresses already set. Pass in a `Envoy::Network::SocketAddressSetterImpl` so that we can continue setting the remote address for tests. - Copied `.bazelversion` and `.bazelrc` from Envoy. Signed-off-by: Jakub Sobon --- .bazelrc | 4 +++- .bazelversion | 2 +- bazel/repositories.bzl | 4 ++-- source/client/stream_decoder.cc | 6 +----- source/client/stream_decoder.h | 10 +++++++--- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.bazelrc b/.bazelrc index ebe96668d..1e3e53623 100644 --- a/.bazelrc +++ b/.bazelrc @@ -60,6 +60,8 @@ build:asan --config=sanitizer # ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN build:asan --define signal_trace=disabled build:asan --define ENVOY_CONFIG_ASAN=1 +# The following two lines were manually edited due to #593. +# Flag undefined was dropped from both the lines to allow CI/ASAN to pass. build:asan --copt -fsanitize=address build:asan --linkopt -fsanitize=address # vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh. @@ -254,7 +256,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:19a268cfe3d12625380e7c61d2467c8779b58b56 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:11efa5680d987fff33fde4af3cc5ece105015d04 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.bazelversion b/.bazelversion index 40c341bdc..0b2eb36f5 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.6.0 +3.7.2 diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index b1be82202..e0ff6576b 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "937f0133355cf2d8eb9e75d3f09e296bb63951be" # Jan 5th, 2021 -ENVOY_SHA = "5bdb98e2f2bad83ac672862e399e4ae2d0ca28c31740a3af16d154fc37401ca7" +ENVOY_COMMIT = "c2522c69f69b318e30927995468c7f440ad2cb5c" # Jan 11th, 2021 +ENVOY_SHA = "2d9b674e8bf249b38d338efe30c0b4e8c8b892f8c5d7f11d1654af0962002cf5" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/stream_decoder.cc b/source/client/stream_decoder.cc index 21c09d74c..c57555950 100644 --- a/source/client/stream_decoder.cc +++ b/source/client/stream_decoder.cc @@ -182,11 +182,7 @@ void StreamDecoder::setupForTracing() { // segfault without it. const auto remote_address = Envoy::Network::Address::InstanceConstSharedPtr{ new Envoy::Network::Address::Ipv4Instance("127.0.0.1")}; - stream_info_.setDownstreamDirectRemoteAddress(remote_address); - // For good measure, we also set DownstreamRemoteAddress, as the associated getter will crash - // if we don't. So this is just in case anyone calls that (or Envoy starts doing so in the - // future). - stream_info_.setDownstreamRemoteAddress(remote_address); + downstream_address_setter_->setDirectRemoteAddressForTest(remote_address); } } // namespace Client diff --git a/source/client/stream_decoder.h b/source/client/stream_decoder.h index cb0f454dc..cdb0653a0 100644 --- a/source/client/stream_decoder.h +++ b/source/client/stream_decoder.h @@ -57,9 +57,12 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, origin_latency_statistic_(origin_latency_statistic), request_headers_(std::move(request_headers)), connect_start_(time_source_.monotonicTime()), complete_(false), measure_latencies_(measure_latencies), - request_body_size_(request_body_size), stream_info_(time_source_), - random_generator_(random_generator), http_tracer_(http_tracer), - latency_response_header_name_(latency_response_header_name) { + request_body_size_(request_body_size), + downstream_address_setter_(std::make_shared( + // The two addresses aren't used in an execution of Nighthawk. + /* downstream_local_address = */ nullptr, /* downstream_remote_address = */ nullptr)), + stream_info_(time_source_, downstream_address_setter_), random_generator_(random_generator), + http_tracer_(http_tracer), latency_response_header_name_(latency_response_header_name) { if (measure_latencies_ && http_tracer_ != nullptr) { setupForTracing(); } @@ -117,6 +120,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder, bool measure_latencies_; const uint32_t request_body_size_; Envoy::Tracing::EgressConfigImpl config_; + std::shared_ptr downstream_address_setter_; Envoy::StreamInfo::StreamInfoImpl stream_info_; Envoy::Random::RandomGenerator& random_generator_; Envoy::Tracing::HttpTracerSharedPtr& http_tracer_; From 566c3f351df8e8b9e4b8de9b39a2bf7fd549ff22 Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Wed, 20 Jan 2021 04:28:36 +0100 Subject: [PATCH 57/63] Update Envoy to 17e8151 (Jan 16th 2021) (#606) Updates the Envoy dependency to revision: 17e815122ff53d0ac6cb2d64cdbf1bfc547bb7e8 Non mechanical change: - LocalinfoImpl constructor changed Signed-off-by: Otto van der Schaaf --- .bazelrc | 2 +- bazel/repositories.bzl | 4 ++-- source/client/process_impl.cc | 2 +- source/client/process_impl.h | 2 ++ 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.bazelrc b/.bazelrc index 1e3e53623..3b8451ff5 100644 --- a/.bazelrc +++ b/.bazelrc @@ -255,7 +255,7 @@ build:remote-clang-cl --config=clang-cl build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox -# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 +# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8 build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:11efa5680d987fff33fde4af3cc5ece105015d04 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index e0ff6576b..bb00901de 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "c2522c69f69b318e30927995468c7f440ad2cb5c" # Jan 11th, 2021 -ENVOY_SHA = "2d9b674e8bf249b38d338efe30c0b4e8c8b892f8c5d7f11d1654af0962002cf5" +ENVOY_COMMIT = "17e815122ff53d0ac6cb2d64cdbf1bfc547bb7e8" # Jan 16th, 2021 +ENVOY_SHA = "9c5f6a2d01796f3aed25796d47a7e5d800db7633baf627e77cf6c7f1b45a965f" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/process_impl.cc b/source/client/process_impl.cc index 52523d3f5..f19f00cfc 100644 --- a/source/client/process_impl.cc +++ b/source/client/process_impl.cc @@ -127,7 +127,7 @@ ProcessImpl::ProcessImpl(const Options& options, Envoy::Event::TimeSystem& time_ request_generator_factory_(options, *api_), options_(options), init_manager_("nh_init_manager"), local_info_(new Envoy::LocalInfo::LocalInfoImpl( - store_root_.symbolTable(), {}, + store_root_.symbolTable(), node_, node_context_params_, Envoy::Network::Utility::getLocalAddress(Envoy::Network::Address::IpVersion::v4), "nighthawk_service_zone", "nighthawk_service_cluster", "nighthawk_service_node")), secret_manager_(config_tracker_), http_context_(store_root_.symbolTable()), diff --git a/source/client/process_impl.h b/source/client/process_impl.h index 0344262db..5936007fc 100644 --- a/source/client/process_impl.h +++ b/source/client/process_impl.h @@ -167,6 +167,8 @@ class ProcessImpl : public Process, public Envoy::Logger::Loggable node_context_params_; std::shared_ptr process_wide_; Envoy::PlatformImpl platform_impl_; Envoy::Event::TimeSystem& time_system_; From 9111749bf3b6ee0019f1066e6e86aab5a868a568 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Mon, 25 Jan 2021 12:47:02 -0500 Subject: [PATCH 58/63] Buming up versions of docker and openrc. (#610) This fixes the broken docker ci workflow. Signed-off-by: Jakub Sobon --- ci/docker/Dockerfile-nighthawk-benchmark | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/docker/Dockerfile-nighthawk-benchmark b/ci/docker/Dockerfile-nighthawk-benchmark index b93d2ae1c..cdc1f07e6 100644 --- a/ci/docker/Dockerfile-nighthawk-benchmark +++ b/ci/docker/Dockerfile-nighthawk-benchmark @@ -8,7 +8,7 @@ WORKDIR /usr/local/bin/benchmarks COPY benchmarks /usr/local/bin/benchmarks/ -RUN apk add --no-cache docker=19.03.12-r0 openrc=0.42.1-r11 python3>=3.8.5 +RUN apk add --no-cache docker=20.10.2-r0 openrc=0.42.1-r19 python3>=3.8.5 RUN rc-update add docker boot RUN if [ ! -e /usr/bin/python ]; then ln -sf python3 /usr/bin/python; fi && \ From 81bf056be43627d5aff58319267783b17cb32084 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Tue, 26 Jan 2021 01:28:26 -0500 Subject: [PATCH 59/63] Update Envoy to 9753819 (Jan 24th 2021). (#607) - tryCreateNewConnection now returns a ConnPoolImplBase::ConnectionResult instead of a bool. Signed-off-by: Jakub Sobon --- .bazelrc | 2 +- bazel/repositories.bzl | 4 ++-- source/client/benchmark_client_impl.cc | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.bazelrc b/.bazelrc index 3b8451ff5..02cf280f2 100644 --- a/.bazelrc +++ b/.bazelrc @@ -256,7 +256,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:11efa5680d987fff33fde4af3cc5ece105015d04 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:c8fa4235714003ba0896287ee2f91cae06e0e407 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index bb00901de..1dc695f43 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "17e815122ff53d0ac6cb2d64cdbf1bfc547bb7e8" # Jan 16th, 2021 -ENVOY_SHA = "9c5f6a2d01796f3aed25796d47a7e5d800db7633baf627e77cf6c7f1b45a965f" +ENVOY_COMMIT = "9753819331d1547c4b8294546a6461a3777958f5" # Jan 24th, 2021 +ENVOY_SHA = "f4d26c7e78c0a478d959ea8bc877f260d4658a8b44e294e3a400f20ad44d41a3" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" diff --git a/source/client/benchmark_client_impl.cc b/source/client/benchmark_client_impl.cc index 28c3ac6e3..53428424a 100644 --- a/source/client/benchmark_client_impl.cc +++ b/source/client/benchmark_client_impl.cc @@ -59,7 +59,8 @@ Http1PoolImpl::newStream(Envoy::Http::ResponseDecoder& response_decoder, while (host_->cluster().resourceManager(priority_).connections().canCreate()) { // We pass in a high prefetch ratio, because we don't want to throttle the prefetched // connection amount like Envoy does out of the box. - if (!tryCreateNewConnection(10000.0)) { + ConnPoolImplBase::ConnectionResult result = tryCreateNewConnection(10000.0); + if (result != ConnectionResult::CreatedNewConnection) { break; } } From 9448036de3dbe29a6bba4d58dff3703a95f36979 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Tue, 26 Jan 2021 14:05:14 -0500 Subject: [PATCH 60/63] Updating references to branch name master->main. (#608) * Updating references to branch name master->main. The branch was renamed recently. Signed-off-by: Jakub Sobon --- CONTRIBUTING.md | 10 +++++----- MAINTAINERS.md | 4 ++-- PROFILING.md | 4 ++-- README.md | 8 ++++---- RELEASE_PROCEDURE.md | 2 +- api/adaptive_load/adaptive_load.proto | 2 +- benchmarks/README.md | 2 +- ci/docker/docker_push.sh | 6 +++--- docs/root/overview.md | 6 +++--- docs/root/statistics.md | 8 ++++---- extensions_build_config.bzl | 2 +- source/client/options_impl.cc | 2 +- source/server/README.md | 2 +- 13 files changed, 29 insertions(+), 29 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9eabaf0a3..4ca44db8f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ maximize the chances of your PR being merged. # Coding style -* Coding style mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/master/STYLE.md) +* Coding style mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/main/STYLE.md) # Breaking change policy @@ -16,22 +16,22 @@ Both API and implementation stability are important to Nighthawk. Since the API # Submitting a PR -* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#submitting-a-pr) with respect to PR submission policy. +* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#submitting-a-pr) with respect to PR submission policy. * Any PR that changes user-facing behavior **must** have associated documentation in [docs](docs) as well as [release notes](docs/root/version_history.md). # PR review policy for maintainers -* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#pr-review-policy-for-maintainers) with respect to maintainer review policy. +* Generally Nighthawk mirrors [Envoy's policy](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#pr-review-policy-for-maintainers) with respect to maintainer review policy. * See [OWNERS.md](OWNERS.md) for the current list of maintainers. * It is helpful if you apply the label `waiting-for-review` to any PRs that are ready to be reviewed by a maintainer. * Reviewers will change the label to `waiting-for-changes` when responding. # DCO: Sign your work -Commits need to be signed off. See [here](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#dco-sign-your-work). +Commits need to be signed off. See [here](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#dco-sign-your-work). ## Triggering CI re-run without making changes -See [here](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#triggering-ci-re-run-without-making-changes). \ No newline at end of file +See [here](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md#triggering-ci-re-run-without-making-changes). diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 8535ce1a3..e66bac8c5 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -29,6 +29,6 @@ Envoy internals. We try to [regularly synchronize our Envoy dependency](https://github.com/envoyproxy/nighthawk/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aclosed+%22update+envoy%22+) with the latest revision. Nighthawk reuses large parts of Envoy's build system and CI infrastructure. When we update, that looks like: - A change to [repositories.bzl](bazel/repositories.bzl) to update the commit and SHA. -- A sync of [.bazelrc](.bazelrc) with [Envoy's version](https://github.com/envoyproxy/envoy/blob/master/.bazelrc) to update our build configurations. -- A sync of the build image sha used in the [ci configuration](.circleci/config.yml) with [Envoy's version](https://github.com/envoyproxy/envoy/blob/master/.circleci/config.yml) to sync our CI testing environment. +- A sync of [.bazelrc](.bazelrc) with [Envoy's version](https://github.com/envoyproxy/envoy/blob/main/.bazelrc) to update our build configurations. +- A sync of the build image sha used in the [ci configuration](.circleci/config.yml) with [Envoy's version](https://github.com/envoyproxy/envoy/blob/main/.circleci/config.yml) to sync our CI testing environment. - Sometimes the dependency update comes with changes that break our build. We include any changes required to Nighthawk to fix that. diff --git a/PROFILING.md b/PROFILING.md index 23e5ddb6a..aa9a508d1 100644 --- a/PROFILING.md +++ b/PROFILING.md @@ -41,7 +41,7 @@ The interface served at localhost:8888 gives you various means to help with anal ### Envoy build -See [building Envoy with Bazel](https://github.com/envoyproxy/envoy/tree/master/bazel#building-envoy-with-bazel). +See [building Envoy with Bazel](https://github.com/envoyproxy/envoy/tree/main/bazel#building-envoy-with-bazel). Envoy’s static build is set up for profiling and can be build with: @@ -49,7 +49,7 @@ Envoy’s static build is set up for profiling and can be build with: bazel build //source/exe:envoy-static ``` -More context: https://github.com/envoyproxy/envoy/blob/master/bazel/PPROF.md +More context: https://github.com/envoyproxy/envoy/blob/main/bazel/PPROF.md ### Nighthawk build diff --git a/README.md b/README.md index 36df4aee8..dbbd6d238 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Nighthawk currently offers: ### Ubuntu -First, follow steps 1 and 2 over at [Quick start Bazel build for developers](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers). +First, follow steps 1 and 2 over at [Quick start Bazel build for developers](https://github.com/envoyproxy/envoy/blob/main/bazel/README.md#quick-start-bazel-build-for-developers). ## Building and using the Nighthawk client CLI @@ -117,9 +117,9 @@ Nighthawk writes to the output. Default is false. --request-source-plugin-config [Request -Source](https://github.com/envoyproxy/nighthawk/blob/master/docs/root/ -overview.md#requestsource) plugin configuration in json or compact -yaml. Mutually exclusive with --request-source. Example (json): +Source](https://github.com/envoyproxy/nighthawk/blob/main/docs/root/ov +erview.md#requestsource) plugin configuration in json or compact yaml. +Mutually exclusive with --request-source. Example (json): {name:"nighthawk.stub-request-source-plugin" ,typed_config:{"@type":"type.googleapis.com/nighthawk.request_source.S tubPluginConfig",test_value:"3"}} diff --git a/RELEASE_PROCEDURE.md b/RELEASE_PROCEDURE.md index e8a3542d0..2950da642 100644 --- a/RELEASE_PROCEDURE.md +++ b/RELEASE_PROCEDURE.md @@ -14,7 +14,7 @@ ## Release steps -1. Speculatively bump the version in [version_info.h](source/common/version_info.h) to the version you determined earlier. This may result in version gaps if a release attempt fails, but avoids having to freeze merges to master and/or having to work with release branches. In short it helps keeping the release procedure lean and mean and eliminates the need for blocking others while this procedure is in-flight. +1. Speculatively bump the version in [version_info.h](source/common/version_info.h) to the version you determined earlier. This may result in version gaps if a release attempt fails, but avoids having to freeze merges to main and/or having to work with release branches. In short it helps keeping the release procedure lean and mean and eliminates the need for blocking others while this procedure is in-flight. 2. Draft a [GitHub tagged release](https://github.com/envoyproxy/nighthawk/releases/new). Earlier releases are tagged like `v0.1`, but as of `0.3.0`we are using [semantic versioning](https://semver.org/spec/v2.0.0.html) 3. Perform thorough testing of the targeted revision to double down on stability [1] 4. Create an optimized build for comparing with the previous release. Changes in performance diff --git a/api/adaptive_load/adaptive_load.proto b/api/adaptive_load/adaptive_load.proto index 43d2456e7..2bf0c71ed 100644 --- a/api/adaptive_load/adaptive_load.proto +++ b/api/adaptive_load/adaptive_load.proto @@ -31,7 +31,7 @@ message AdaptiveLoadSessionSpec { // visualization. Optional. repeated MetricSpec informational_metric_specs = 3; // A proto describing Nighthawk Service traffic. See - // https://github.com/envoyproxy/nighthawk/blob/master/api/client/options.proto + // https://github.com/envoyproxy/nighthawk/blob/main/api/client/options.proto // // The adaptive load controller will return an error if the |duration| field is set within // |nighthawk_traffic_template|. diff --git a/benchmarks/README.md b/benchmarks/README.md index 2732a378b..65e3ca1e6 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -128,4 +128,4 @@ docker run -it --rm \ - Have a mode where nighthawk_test_server provides high-res control timings in its access logs - The ability to repeat the runs multiple times and obtain stats, e.g. how much variance there is, mean, etc. -- The ability to do A/B testing, similar to https://github.com/envoyproxy/envoy-perf/blob/master/siege/siege.py#L3. +- The ability to do A/B testing, similar to https://github.com/envoyproxy/envoy-perf/blob/main/siege/siege.py#L3. diff --git a/ci/docker/docker_push.sh b/ci/docker/docker_push.sh index 9fd065a87..e5091e523 100755 --- a/ci/docker/docker_push.sh +++ b/ci/docker/docker_push.sh @@ -12,8 +12,8 @@ fi DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/nighthawk}" -# push the nighthawk image on tags or merge to master -if [[ "$CIRCLE_BRANCH" = 'master' ]]; then +# push the nighthawk image on tags or merge to main +if [[ "$CIRCLE_BRANCH" = 'main' ]]; then docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" docker push "${DOCKER_IMAGE_PREFIX}-dev:latest" docker tag "${DOCKER_IMAGE_PREFIX}-dev:latest" "${DOCKER_IMAGE_PREFIX}-dev:${CIRCLE_SHA1}" @@ -26,6 +26,6 @@ else docker tag "${DOCKER_IMAGE_PREFIX}:${TAG}" "${DOCKER_IMAGE_PREFIX}:${TAG}" docker push "${DOCKER_IMAGE_PREFIX}:${TAG}" else - echo 'Ignoring non-master branch for docker push.' + echo 'Ignoring non-main branch for docker push.' fi fi diff --git a/docs/root/overview.md b/docs/root/overview.md index 7d53c419f..46cb147ce 100644 --- a/docs/root/overview.md +++ b/docs/root/overview.md @@ -58,7 +58,7 @@ back reports per phase. ## Key concept descriptions -*The c++ interface definitions for the concepts below can be found [here](https://github.com/envoyproxy/nighthawk/tree/master/include/nighthawk)*. +*The c++ interface definitions for the concepts below can be found [here](https://github.com/envoyproxy/nighthawk/tree/main/include/nighthawk)*. ### Process @@ -195,5 +195,5 @@ Users of Nighthawk can specify custom format and destination (logging sink delegate) for all Nighthawk logging messages. Nighthawk utilizes the Envoy's logging mechanism by performing all logging via the **ENVOY_LOG** macro. To customize this mechanism, users need to perform two steps: -1. Create a logging sink delegate inherited from [Envoy SinkDelegate](https://github.com/envoyproxy/envoy/blob/master/source/common/common/logger.h). -2. Construct a ServiceImpl object with an [Envoy Logger Context](https://github.com/envoyproxy/envoy/blob/master/source/common/common/logger.h) which contains user-specified log level and format. +1. Create a logging sink delegate inherited from [Envoy SinkDelegate](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h). +2. Construct a ServiceImpl object with an [Envoy Logger Context](https://github.com/envoyproxy/envoy/blob/main/source/common/common/logger.h) which contains user-specified log level and format. diff --git a/docs/root/statistics.md b/docs/root/statistics.md index 932e5e53f..f8419419b 100644 --- a/docs/root/statistics.md +++ b/docs/root/statistics.md @@ -61,7 +61,7 @@ histogram values are sent directly to the sinks. A stat is an interface that takes generic stat data and translates it into a backend-specific wire format. Currently Envoy supports the TCP and UDP [statsd](https://github.com/b/statsd_spec) protocol (implemented in -[statsd.h](https://github.com/envoyproxy/envoy/blob/master/source/extensions/stat_sinks/common/statsd/statsd.h)). +[statsd.h](https://github.com/envoyproxy/envoy/blob/main/source/extensions/stat_sinks/common/statsd/statsd.h)). Users can create their own Sink subclass to translate Envoy metrics into backend-specific format. @@ -90,7 +90,7 @@ stats.upstream_cx_length_.recordValue(...); Currently Envoy metrics don't support key-value map. As a result, for metrics to be broken down by certain dimensions, we need to define a separate metric for each dimension. For example, currently Nighthawk defines -[separate counters](https://github.com/envoyproxy/nighthawk/blob/master/source/client/benchmark_client_impl.h#L35-L40) +[separate counters](https://github.com/envoyproxy/nighthawk/blob/main/source/client/benchmark_client_impl.h#L35-L40) to monitor the number of responses with corresponding response code. ## Envoy Metrics Flush @@ -128,7 +128,7 @@ key-value map. ## Reference - [Nighthawk: architecture and key - concepts](https://github.com/envoyproxy/nighthawk/blob/master/docs/root/overview.md) + concepts](https://github.com/envoyproxy/nighthawk/blob/main/docs/root/overview.md) - [Envoy Stats - System](https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md) + System](https://github.com/envoyproxy/envoy/blob/main/source/docs/stats.md) - [Envoy Stats blog](https://blog.envoyproxy.io/envoy-stats-b65c7f363342) diff --git a/extensions_build_config.bzl b/extensions_build_config.bzl index b77ba36a4..a15024419 100644 --- a/extensions_build_config.bzl +++ b/extensions_build_config.bzl @@ -1,4 +1,4 @@ -# See https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#disabling-extensions for details on how this system works. +# See https://github.com/envoyproxy/envoy/blob/main/bazel/README.md#disabling-extensions for details on how this system works. EXTENSIONS = { "envoy.filters.http.router": "//source/extensions/filters/http/router:config", "envoy.filters.http.fault": "//source/extensions/filters/http/fault:config", diff --git a/source/client/options_impl.cc b/source/client/options_impl.cc index 935a423ff..4cf761a41 100644 --- a/source/client/options_impl.cc +++ b/source/client/options_impl.cc @@ -270,7 +270,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) { TCLAP::ValueArg request_source_plugin_config( "", "request-source-plugin-config", "[Request " - "Source](https://github.com/envoyproxy/nighthawk/blob/master/docs/root/" + "Source](https://github.com/envoyproxy/nighthawk/blob/main/docs/root/" "overview.md#requestsource) plugin configuration in json or compact yaml. " "Mutually exclusive with --request-source. Example (json): " "{name:\"nighthawk.stub-request-source-plugin\",typed_config:{" diff --git a/source/server/README.md b/source/server/README.md index ad0fb9dcd..81ada3815 100644 --- a/source/server/README.md +++ b/source/server/README.md @@ -15,7 +15,7 @@ bazel build -c opt :nighthawk_test_server ``` It is possible to -[enable additional envoy extension](https://github.com/envoyproxy/envoy/blob/master/source/extensions/extensions_build_config.bzl) by adding them [here](../../extensions_build_config.bzl) before the build. +[enable additional envoy extension](https://github.com/envoyproxy/envoy/blob/main/source/extensions/extensions_build_config.bzl) by adding them [here](../../extensions_build_config.bzl) before the build. By default, Nighthawk's test server is set up with the minimum extension set needed for it to operate as documented. From 9ade1a58c787e4d0e165cabbb42f6a410a56a865 Mon Sep 17 00:00:00 2001 From: Jakub Sobon Date: Tue, 26 Jan 2021 14:05:51 -0500 Subject: [PATCH 61/63] Updating README under benchmarks. (#609) Changing github repository paths from oschaaf to envoyproxy. Signed-off-by: Jakub Sobon --- benchmarks/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/README.md b/benchmarks/README.md index 65e3ca1e6..33244c8bf 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -26,7 +26,7 @@ It will run a selection of an example [benchmarks](test/test_discovery.py) extracted from `/benchmarks`, which injects Envoy between the benchmark client and test server. ```bash -git clone https://github.com/oschaaf/nighthawk.git benchmark-test +git clone https://github.com/envoyproxy/nighthawk.git benchmark-test cd benchmark-test bazel build //benchmarks:benchmarks @@ -54,7 +54,7 @@ client and server. If not set, the benchmark suite will fall back to configuring Nighthawk's test server for that. Note that the build can be a lengthy process. ```bash -git clone https://github.com/oschaaf/nighthawk.git benchmark-test +git clone https://github.com/envoyproxy/nighthawk.git benchmark-test cd benchmark-test bazel test \ --test_summary=detailed \ From f55bc97d2cda041d1e75f5a4c02234a9c3b7a86f Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Mon, 1 Feb 2021 19:39:54 +0100 Subject: [PATCH 62/63] Update Envoy to f6679d5 (Feb 1st 2021) (#614) Signed-off-by: Otto van der Schaaf --- bazel/repositories.bzl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 1dc695f43..e13ff81b5 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,7 +1,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ENVOY_COMMIT = "9753819331d1547c4b8294546a6461a3777958f5" # Jan 24th, 2021 -ENVOY_SHA = "f4d26c7e78c0a478d959ea8bc877f260d4658a8b44e294e3a400f20ad44d41a3" +ENVOY_COMMIT = "f6679d51cc7b2f0b5e05c883a035ad87d011f454" # Feb 1st, 2021 +ENVOY_SHA = "77a765b6f1063925ac53d09335dd23b546b4254a392755bddf5d81b4d299cd5f" HDR_HISTOGRAM_C_VERSION = "0.11.2" # October 12th, 2020 HDR_HISTOGRAM_C_SHA = "637f28b5f64de2e268131e4e34e6eef0b91cf5ff99167db447d9b2825eae6bad" From 58dbc8fbf443c7888f71145dec36cc1e1d972e09 Mon Sep 17 00:00:00 2001 From: eric846 <56563761+eric846@users.noreply.github.com> Date: Thu, 4 Feb 2021 15:54:06 -0500 Subject: [PATCH 63/63] Mock Adaptive Load Controller (#615) A mock for AdaptiveLoadController for use in unit tests of the upcoming adaptive load CLI executable. Part of #416 Signed-off-by: eric846 <56563761+eric846@users.noreply.github.com> --- test/mocks/adaptive_load/BUILD | 10 ++++++ .../mock_adaptive_load_controller.cc | 7 ++++ .../mock_adaptive_load_controller.h | 33 +++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 test/mocks/adaptive_load/mock_adaptive_load_controller.cc create mode 100644 test/mocks/adaptive_load/mock_adaptive_load_controller.h diff --git a/test/mocks/adaptive_load/BUILD b/test/mocks/adaptive_load/BUILD index ff68762f6..44402c141 100644 --- a/test/mocks/adaptive_load/BUILD +++ b/test/mocks/adaptive_load/BUILD @@ -8,6 +8,16 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_mock( + name = "mock_adaptive_load_controller", + srcs = ["mock_adaptive_load_controller.cc"], + hdrs = ["mock_adaptive_load_controller.h"], + repository = "@envoy", + deps = [ + "//include/nighthawk/adaptive_load:adaptive_load_controller", + ], +) + envoy_cc_mock( name = "mock_metrics_evaluator", srcs = ["mock_metrics_evaluator.cc"], diff --git a/test/mocks/adaptive_load/mock_adaptive_load_controller.cc b/test/mocks/adaptive_load/mock_adaptive_load_controller.cc new file mode 100644 index 000000000..a0f0578dd --- /dev/null +++ b/test/mocks/adaptive_load/mock_adaptive_load_controller.cc @@ -0,0 +1,7 @@ +#include "test/mocks/adaptive_load/mock_adaptive_load_controller.h" + +namespace Nighthawk { + +MockAdaptiveLoadController::MockAdaptiveLoadController() = default; + +} // namespace Nighthawk diff --git a/test/mocks/adaptive_load/mock_adaptive_load_controller.h b/test/mocks/adaptive_load/mock_adaptive_load_controller.h new file mode 100644 index 000000000..0a0be0d28 --- /dev/null +++ b/test/mocks/adaptive_load/mock_adaptive_load_controller.h @@ -0,0 +1,33 @@ +#pragma once + +#include "nighthawk/adaptive_load/adaptive_load_controller.h" + +#include "gmock/gmock.h" + +namespace Nighthawk { + +/** + * A mock AdaptiveLoadController that returns empty values or success from all methods + * by default. + * + * + * Typical usage: + * + * NiceMock mock_controller; + * EXPECT_CALL(mock_controller, PerformAdaptiveLoadSession(_)) + * .WillOnce(Return(AdaptiveLoadSessionOutput())); + */ +class MockAdaptiveLoadController : public AdaptiveLoadController { +public: + /** + * Empty constructor. + */ + MockAdaptiveLoadController(); + + MOCK_METHOD(absl::StatusOr, + PerformAdaptiveLoadSession, + (nighthawk::client::NighthawkService::StubInterface * nighthawk_service_stub, + const nighthawk::adaptive_load::AdaptiveLoadSessionSpec& spec)); +}; + +} // namespace Nighthawk