From 1301f1182bbbddf6f20d1b29acb04b54e75a5187 Mon Sep 17 00:00:00 2001 From: Dan Rosen Date: Fri, 22 Mar 2019 15:48:03 -0400 Subject: [PATCH 001/165] common: introduce new "safe" init manager (#6296) Introduce a new "safe" init manager, to replace the existing one that's prone to use-after-free issues (see e.g. #6116). Users of the existing init manager will be upgraded one-by-one in subsequent PRs if this design is approved. See also previous false starts in PRs #6136 and #6245. Risk Level: Low, no existing users of the existing init manager are changed in this PR. Testing: New unit tests added. Docs Changes: n/a Release Notes: n/a Signed-off-by: Dan Rosen --- include/envoy/init/init.h | 6 +- include/envoy/safe_init/BUILD | 31 ++++ include/envoy/safe_init/manager.h | 79 +++++++++ include/envoy/safe_init/target.h | 52 ++++++ include/envoy/safe_init/watcher.h | 53 ++++++ source/common/safe_init/BUILD | 40 +++++ source/common/safe_init/manager_impl.cc | 79 +++++++++ source/common/safe_init/manager_impl.h | 62 +++++++ source/common/safe_init/target_impl.cc | 54 ++++++ source/common/safe_init/target_impl.h | 89 ++++++++++ source/common/safe_init/watcher_impl.cc | 38 ++++ source/common/safe_init/watcher_impl.h | 73 ++++++++ source/server/init_manager_impl.h | 3 +- test/common/safe_init/BUILD | 34 ++++ test/common/safe_init/manager_impl_test.cc | 194 +++++++++++++++++++++ test/common/safe_init/target_impl_test.cc | 65 +++++++ test/common/safe_init/watcher_impl_test.cc | 36 ++++ test/mocks/safe_init/BUILD | 20 +++ test/mocks/safe_init/mocks.cc | 25 +++ test/mocks/safe_init/mocks.h | 66 +++++++ 20 files changed, 1096 insertions(+), 3 deletions(-) create mode 100644 include/envoy/safe_init/BUILD create mode 100644 include/envoy/safe_init/manager.h create mode 100644 include/envoy/safe_init/target.h create mode 100644 include/envoy/safe_init/watcher.h create mode 100644 source/common/safe_init/BUILD create mode 100644 source/common/safe_init/manager_impl.cc create mode 100644 source/common/safe_init/manager_impl.h create mode 100644 source/common/safe_init/target_impl.cc create mode 100644 source/common/safe_init/target_impl.h create mode 100644 source/common/safe_init/watcher_impl.cc create mode 100644 source/common/safe_init/watcher_impl.h create mode 100644 test/common/safe_init/BUILD create mode 100644 test/common/safe_init/manager_impl_test.cc create mode 100644 test/common/safe_init/target_impl_test.cc create mode 100644 test/common/safe_init/watcher_impl_test.cc create mode 100644 test/mocks/safe_init/BUILD create mode 100644 test/mocks/safe_init/mocks.cc create mode 100644 test/mocks/safe_init/mocks.h diff --git a/include/envoy/init/init.h b/include/envoy/init/init.h index 824dbd01fac59..338511c3545b8 100644 --- a/include/envoy/init/init.h +++ b/include/envoy/init/init.h @@ -10,7 +10,8 @@ namespace Envoy { namespace Init { /** - * A single initialization target. + * A single initialization target. Deprecated, use SafeInit::Target instead. + * TODO(mergeconflict): convert all Init::Target implementations to SafeInit::TargetImpl. */ class Target { public: @@ -25,7 +26,8 @@ class Target { }; /** - * A manager that initializes multiple targets. + * A manager that initializes multiple targets. Deprecated, use SafeInit::Manager instead. + * TODO(mergeconflict): convert all Init::Manager uses to SafeInit::Manager. */ class Manager { public: diff --git a/include/envoy/safe_init/BUILD b/include/envoy/safe_init/BUILD new file mode 100644 index 0000000000000..2229d7c7a12e4 --- /dev/null +++ b/include/envoy/safe_init/BUILD @@ -0,0 +1,31 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "watcher_interface", + hdrs = ["watcher.h"], +) + +envoy_cc_library( + name = "target_interface", + hdrs = ["target.h"], + deps = [ + ":watcher_interface", + ], +) + +envoy_cc_library( + name = "manager_interface", + hdrs = ["manager.h"], + deps = [ + ":target_interface", + ":watcher_interface", + ], +) diff --git a/include/envoy/safe_init/manager.h b/include/envoy/safe_init/manager.h new file mode 100644 index 0000000000000..a94718fbd2869 --- /dev/null +++ b/include/envoy/safe_init/manager.h @@ -0,0 +1,79 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/safe_init/target.h" +#include "envoy/safe_init/watcher.h" + +namespace Envoy { +namespace SafeInit { + +/** + * SafeInit::Manager coordinates initialization of one or more "targets." A typical flow would be: + * + * - One or more initialization targets are registered with a manager using `add`. + * - The manager is told to `initialize` all its targets, given a Watcher to notify when all + * registered targets are initialized. + * - Each target will initialize, either immediately or asynchronously, and will signal + * `ready` to the manager when initialized. + * - When all targets are initialized, the manager signals `ready` to the watcher it was given + * previously. + * + * Since there are several entities involved in this flow -- the owner of the manager, the targets + * registered with the manager, and the manager itself -- it may be difficult or impossible in some + * cases to guarantee that their lifetimes line up correctly to avoid use-after-free errors. The + * interface design here in SafeInit allows implementations to avoid the issue: + * + * - A Target can only be initialized via a TargetHandle, which acts as a weak reference. + * Attempting to initialize a destroyed Target via its handle has no ill effects. + * - Likewise, a Watcher can only be notified that initialization was complete via a + * WatcherHandle, which acts as a weak reference as well. + * + * See target.h and watcher.h, as well as implementation in source/common/safe_init for details. + */ +struct Manager { + virtual ~Manager() = default; + + /** + * The manager's state, used e.g. for reporting in the admin server. + */ + enum class State { + /** + * Targets have not been initialized. + */ + Uninitialized, + /** + * Targets are currently being initialized. + */ + Initializing, + /** + * All targets have been initialized. + */ + Initialized + }; + + /** + * @return the current state of the manager. + */ + virtual State state() const PURE; + + /** + * Register an initialization target. If the manager's current state is uninitialized, the target + * will be saved for invocation later, when `initialize` is called. If the current state is + * initializing, the target will be invoked immediately. It is an error to register a target with + * a manager that is already in initialized state. + * @param target the target to be invoked when initialization begins. + */ + virtual void add(const Target& target) PURE; + + /** + * Start initialization of all previously registered targets, and notify the given Watcher when + * initialization is complete. It is an error to call initialize on a manager that is already in + * initializing or initialized state. If the manager contains no targets, initialization completes + * immediately. + * @param watcher the watcher to notify when initialization is complete. + */ + virtual void initialize(const Watcher& watcher) PURE; +}; + +} // namespace SafeInit +} // namespace Envoy diff --git a/include/envoy/safe_init/target.h b/include/envoy/safe_init/target.h new file mode 100644 index 0000000000000..25dd958d3a646 --- /dev/null +++ b/include/envoy/safe_init/target.h @@ -0,0 +1,52 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" +#include "envoy/safe_init/watcher.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace SafeInit { + +/** + * A TargetHandle functions as a weak reference to a Target. It is how an implementation of + * SafeInit::Manager would safely tell a target to `initialize` with no guarantees about the + * target's lifetime. Typical usage (outside of SafeInit::ManagerImpl) does not require touching + * TargetHandles at all. + */ +struct TargetHandle { + virtual ~TargetHandle() = default; + + /** + * Tell the target to begin initialization, if it is still available. + * @param watcher A Watcher for the target to notify when it has initialized. + * @return true if the target received this call, false if the target was already destroyed. + */ + virtual bool initialize(const Watcher& watcher) const PURE; +}; +using TargetHandlePtr = std::unique_ptr; + +/** + * An initialization Target is an entity that can be registered with a Manager for initialization. + * It can only be invoked through a TargetHandle. + */ +struct Target { + virtual ~Target() = default; + + /** + * @return a human-readable target name, for logging / debugging. + */ + virtual absl::string_view name() const PURE; + + /** + * Create a new handle that can initialize this target. + * @param name a human readable handle name, for logging / debugging. + * @return a new handle that can initialize this target. + */ + virtual TargetHandlePtr createHandle(absl::string_view name) const PURE; +}; + +} // namespace SafeInit +} // namespace Envoy diff --git a/include/envoy/safe_init/watcher.h b/include/envoy/safe_init/watcher.h new file mode 100644 index 0000000000000..b9eb0cf08959e --- /dev/null +++ b/include/envoy/safe_init/watcher.h @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace SafeInit { + +/** + * A WatcherHandle functions as a weak reference to a Watcher. It is how an implementation of + * SafeInit::Target would safely notify a Manager that it has initialized, and likewise it's how + * an implementation of SafeInit::Manager would safely tell its client that all registered targets + * have initialized, with no guarantees about the lifetimes of the manager or client. Typical usage + * (outside of SafeInit::TargetImpl and ManagerImpl) does not require touching WatcherHandles at + * all. + */ +struct WatcherHandle { + virtual ~WatcherHandle() = default; + + /** + * Tell the watcher that initialization has completed, if it is still available. + * @return true if the watcher received this call, false if the watcher was already destroyed. + */ + virtual bool ready() const PURE; +}; +using WatcherHandlePtr = std::unique_ptr; + +/** + * A Watcher is an entity that listens for notifications that either an initialization target or + * all targets registered with a manager have initialized. It can only be invoked through a + * WatcherHandle. + */ +struct Watcher { + virtual ~Watcher() = default; + + /** + * @return a human-readable target name, for logging / debugging. + */ + virtual absl::string_view name() const PURE; + + /** + * Create a new handle that can notify this watcher. + * @param name a human readable handle name, for logging / debugging. + * @return a new handle that can notify this watcher. + */ + virtual WatcherHandlePtr createHandle(absl::string_view name) const PURE; +}; + +} // namespace SafeInit +} // namespace Envoy diff --git a/source/common/safe_init/BUILD b/source/common/safe_init/BUILD new file mode 100644 index 0000000000000..269cd9fbaace6 --- /dev/null +++ b/source/common/safe_init/BUILD @@ -0,0 +1,40 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "watcher_lib", + srcs = ["watcher_impl.cc"], + hdrs = ["watcher_impl.h"], + deps = [ + "//include/envoy/safe_init:watcher_interface", + "//source/common/common:logger_lib", + ], +) + +envoy_cc_library( + name = "target_lib", + srcs = ["target_impl.cc"], + hdrs = ["target_impl.h"], + deps = [ + "//include/envoy/safe_init:target_interface", + "//source/common/common:logger_lib", + ], +) + +envoy_cc_library( + name = "manager_lib", + srcs = ["manager_impl.cc"], + hdrs = ["manager_impl.h"], + deps = [ + ":watcher_lib", + "//include/envoy/safe_init:manager_interface", + "//source/common/common:logger_lib", + ], +) diff --git a/source/common/safe_init/manager_impl.cc b/source/common/safe_init/manager_impl.cc new file mode 100644 index 0000000000000..a21827c67f2f0 --- /dev/null +++ b/source/common/safe_init/manager_impl.cc @@ -0,0 +1,79 @@ +#include "common/safe_init/manager_impl.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace SafeInit { + +ManagerImpl::ManagerImpl(absl::string_view name) + : name_(fmt::format("init manager {}", name)), state_(State::Uninitialized), count_(0), + watcher_(name_, [this]() { onTargetReady(); }) {} + +Manager::State ManagerImpl::state() const { return state_; } + +void ManagerImpl::add(const Target& target) { + ++count_; + TargetHandlePtr target_handle(target.createHandle(name_)); + switch (state_) { + case State::Uninitialized: + // If the manager isn't initialized yet, save the target handle to be initialized later. + ENVOY_LOG(debug, "added {} to {}", target.name(), name_); + target_handles_.push_back(std::move(target_handle)); + return; + case State::Initializing: + // If the manager is already initializing, initialize the new target immediately. Note that + // it's important in this case that count_ was incremented above before calling the target, + // because if the target calls the init manager back immediately, count_ will be decremented + // here (see the definition of watcher_ above). + target_handle->initialize(watcher_); + return; + case State::Initialized: + // If the manager has already completed initialization, consider this a programming error. + ASSERT(false, fmt::format("attempted to add {} to initialized {}", target.name(), name_)); + } +} + +void ManagerImpl::initialize(const Watcher& watcher) { + // If the manager is already initializing or initialized, consider this a programming error. + ASSERT(state_ == State::Uninitialized, fmt::format("attempted to initialize {} twice", name_)); + + // Create a handle to notify when initialization is complete. + watcher_handle_ = watcher.createHandle(name_); + + if (count_ == 0) { + // If we have no targets, initialization trivially completes. This can happen, and is fine. + ENVOY_LOG(debug, "{} contains no targets", name_); + ready(); + } else { + // If we have some targets, start initialization... + ENVOY_LOG(debug, "{} initializing", name_); + state_ = State::Initializing; + + // Attempt to initialize each target. If a target is unavailable, treat it as though it + // completed immediately. + for (const auto& target_handle : target_handles_) { + if (!target_handle->initialize(watcher_)) { + onTargetReady(); + } + } + } +} + +void ManagerImpl::onTargetReady() { + // If there are no remaining targets and one mysteriously calls us back, this manager is haunted. + ASSERT(count_ != 0, fmt::format("{} called back by target after initialization complete")); + + // If there are no uninitialized targets remaining when called back by a target, that means it was + // the last. Signal `ready` to the handle we saved in `initialize`. + if (--count_ == 0) { + ready(); + } +} + +void ManagerImpl::ready() { + state_ = State::Initialized; + watcher_handle_->ready(); +} + +} // namespace SafeInit +} // namespace Envoy diff --git a/source/common/safe_init/manager_impl.h b/source/common/safe_init/manager_impl.h new file mode 100644 index 0000000000000..7a88572422ad7 --- /dev/null +++ b/source/common/safe_init/manager_impl.h @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include "envoy/safe_init/manager.h" + +#include "common/common/logger.h" +#include "common/safe_init/watcher_impl.h" + +namespace Envoy { +namespace SafeInit { + +/** + * SafeInit::ManagerImpl coordinates initialization of one or more "targets." See comments in + * include/envoy/safe_init/manager.h for an overview. + * + * When the logging level is set to "debug" or "trace," the log will contain entries for all + * significant events in the initialization flow: + * + * - Targets added to the manager + * - Initialization started for the manager and for each target + * - Initialization completed for each target and for the manager + * - Destruction of targets and watchers + * - Callbacks to "unavailable" (deleted) targets, manager, or watchers + */ +class ManagerImpl : public Manager, Logger::Loggable { +public: + /** + * @param name a human-readable manager name, for logging / debugging. + */ + ManagerImpl(absl::string_view name); + + // SafeInit::Manager + State state() const override; + void add(const Target& target) override; + void initialize(const Watcher& watcher) override; + +private: + void onTargetReady(); + void ready(); + + // Human-readable name for logging + const std::string name_; + + // Current state + State state_; + + // Current number of registered targets that have not yet initialized + uint32_t count_; + + // Handle to the watcher passed in `initialize`, to be called when initialization completes + WatcherHandlePtr watcher_handle_; + + // Watcher to receive ready notifications from each target + const WatcherImpl watcher_; + + // All registered targets + std::list target_handles_; +}; + +} // namespace SafeInit +} // namespace Envoy diff --git a/source/common/safe_init/target_impl.cc b/source/common/safe_init/target_impl.cc new file mode 100644 index 0000000000000..bdc839018e38e --- /dev/null +++ b/source/common/safe_init/target_impl.cc @@ -0,0 +1,54 @@ +#include "common/safe_init/target_impl.h" + +namespace Envoy { +namespace SafeInit { + +TargetHandleImpl::TargetHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn) + : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {} + +bool TargetHandleImpl::initialize(const Watcher& watcher) const { + auto locked_fn(fn_.lock()); + if (locked_fn) { + // If we can "lock" a shared pointer to the target's callback function, call it + // with a new handle to the ManagerImpl's watcher that was passed in. + ENVOY_LOG(debug, "{} initializing {}", handle_name_, name_); + (*locked_fn)(watcher.createHandle(name_)); + return true; + } else { + // If not, the target was already destroyed. + ENVOY_LOG(debug, "{} can't initialize {} (unavailable)", handle_name_, name_); + return false; + } +} + +TargetImpl::TargetImpl(absl::string_view name, InitializeFn fn) + : name_(fmt::format("target {}", name)), + fn_(std::make_shared([this, fn](WatcherHandlePtr watcher_handle) { + watcher_handle_ = std::move(watcher_handle); + fn(); + })) {} + +TargetImpl::~TargetImpl() { ENVOY_LOG(debug, "{} destroyed", name_); } + +absl::string_view TargetImpl::name() const { return name_; } + +TargetHandlePtr TargetImpl::createHandle(absl::string_view handle_name) const { + // Note: can't use std::make_unique here because TargetHandleImpl ctor is private. + return std::unique_ptr( + new TargetHandleImpl(handle_name, name_, std::weak_ptr(fn_))); +} + +bool TargetImpl::ready() { + if (watcher_handle_) { + // If we have a handle for the ManagerImpl's watcher, signal it and then reset so it can't be + // accidentally signaled again. + const bool result = watcher_handle_->ready(); + watcher_handle_.reset(); + return result; + } + return false; +} + +} // namespace SafeInit +} // namespace Envoy diff --git a/source/common/safe_init/target_impl.h b/source/common/safe_init/target_impl.h new file mode 100644 index 0000000000000..675cfceb91eec --- /dev/null +++ b/source/common/safe_init/target_impl.h @@ -0,0 +1,89 @@ +#pragma once + +#include + +#include "envoy/safe_init/target.h" + +#include "common/common/logger.h" + +namespace Envoy { +namespace SafeInit { + +/** + * A target is just a glorified callback function, called by the manager it was registered with. + */ +using InitializeFn = std::function; + +/** + * Internally, the callback is slightly more sophisticated: it actually takes a WatcherHandlePtr + * that it uses to notify the manager when the target is ready. It saves this pointer when invoked + * and resets it later in `ready`. Users needn't care about this implementation detail, they only + * need to provide an `InitializeFn` above when constructing a target. + */ +using InternalInitalizeFn = std::function; + +/** + * A TargetHandleImpl functions as a weak reference to a TargetImpl. It is how a ManagerImpl safely + * tells a target to `initialize` with no guarantees about the target's lifetime. + */ +class TargetHandleImpl : public TargetHandle, Logger::Loggable { +private: + friend class TargetImpl; + TargetHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn); + +public: + // SafeInit::TargetHandle + bool initialize(const Watcher& watcher) const override; + +private: + // Name of the handle (almost always the name of the ManagerImpl calling the target) + const std::string handle_name_; + + // Name of the target + const std::string name_; + + // The target's callback function, only called if the weak pointer can be "locked" + const std::weak_ptr fn_; +}; + +/** + * A TargetImpl is an entity that can be registered with a Manager for initialization. It can only + * be invoked through a TargetHandle. + */ +class TargetImpl : public Target, Logger::Loggable { +public: + /** + * @param name a human-readable target name, for logging / debugging + * @fn a callback function to invoke when `initialize` is called on the handle. Note that this + * doesn't take a WatcherHandlePtr (like TargetFn does). Managing the watcher handle is done + * internally to simplify usage. + */ + TargetImpl(absl::string_view name, InitializeFn fn); + ~TargetImpl() override; + + // SafeInit::Target + absl::string_view name() const override; + TargetHandlePtr createHandle(absl::string_view handle_name) const override; + + /** + * Signal to the init manager that this target has finished initializing. This is safe to call + * any time. Calling it before initialization begins or after initialization has already ended + * will have no effect. + * @return true if the init manager received this call, false otherwise. + */ + bool ready(); + +private: + // Human-readable name for logging + const std::string name_; + + // Handle to the ManagerImpl's internal watcher, to call when this target is initialized + WatcherHandlePtr watcher_handle_; + + // The callback function, called via TargetHandleImpl by the manager + const std::shared_ptr fn_; +}; + +} // namespace SafeInit +} // namespace Envoy diff --git a/source/common/safe_init/watcher_impl.cc b/source/common/safe_init/watcher_impl.cc new file mode 100644 index 0000000000000..ee7899f55637f --- /dev/null +++ b/source/common/safe_init/watcher_impl.cc @@ -0,0 +1,38 @@ +#include "common/safe_init/watcher_impl.h" + +namespace Envoy { +namespace SafeInit { + +WatcherHandleImpl::WatcherHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn) + : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {} + +bool WatcherHandleImpl::ready() const { + auto locked_fn(fn_.lock()); + if (locked_fn) { + // If we can "lock" a shared pointer to the watcher's callback function, call it. + ENVOY_LOG(debug, "{} initialized, notifying {}", handle_name_, name_); + (*locked_fn)(); + return true; + } else { + // If not, the watcher was already destroyed. + ENVOY_LOG(debug, "{} initialized, but can't notify {} (unavailable)", handle_name_, name_); + return false; + } +} + +WatcherImpl::WatcherImpl(absl::string_view name, ReadyFn fn) + : name_(name), fn_(std::make_shared(std::move(fn))) {} + +WatcherImpl::~WatcherImpl() { ENVOY_LOG(debug, "{} destroyed", name_); } + +absl::string_view WatcherImpl::name() const { return name_; } + +WatcherHandlePtr WatcherImpl::createHandle(absl::string_view handle_name) const { + // Note: can't use std::make_unique because WatcherHandleImpl ctor is private + return std::unique_ptr( + new WatcherHandleImpl(handle_name, name_, std::weak_ptr(fn_))); +} + +} // namespace SafeInit +} // namespace Envoy diff --git a/source/common/safe_init/watcher_impl.h b/source/common/safe_init/watcher_impl.h new file mode 100644 index 0000000000000..582fd64910816 --- /dev/null +++ b/source/common/safe_init/watcher_impl.h @@ -0,0 +1,73 @@ +#pragma once + +#include + +#include "envoy/safe_init/watcher.h" + +#include "common/common/logger.h" + +namespace Envoy { +namespace SafeInit { + +/** + * A watcher is just a glorified callback function, called by a target or a manager when + * initialization completes. + */ +using ReadyFn = std::function; + +/** + * A WatcherHandleImpl functions as a weak reference to a Watcher. It is how a TargetImpl safely + * notifies a ManagerImpl that it has initialized, and likewise it's how ManagerImpl safely tells + * its client that all registered targets have initialized, with no guarantees about the lifetimes + * of the manager or client. + */ +class WatcherHandleImpl : public WatcherHandle, Logger::Loggable { +private: + friend class WatcherImpl; + WatcherHandleImpl(absl::string_view handle_name, absl::string_view name, + std::weak_ptr fn); + +public: + // SafeInit::WatcherHandle + bool ready() const override; + +private: + // Name of the handle (either the name of the target calling the manager, or the name of the + // manager calling the client) + const std::string handle_name_; + + // Name of the watcher (either the name of the manager, or the name of the client) + const std::string name_; + + // The watcher's callback function, only called if the weak pointer can be "locked" + const std::weak_ptr fn_; +}; + +/** + * A WatcherImpl is an entity that listens for notifications that either an initialization target or + * all targets registered with a manager have initialized. It can only be invoked through a + * WatcherHandleImpl. + */ +class WatcherImpl : public Watcher, Logger::Loggable { +public: + /** + * @param name a human-readable watcher name, for logging / debugging + * @param fn a callback function to invoke when `ready` is called on the handle + */ + WatcherImpl(absl::string_view name, ReadyFn fn); + ~WatcherImpl() override; + + // SafeInit::Watcher + absl::string_view name() const override; + WatcherHandlePtr createHandle(absl::string_view handle_name) const override; + +private: + // Human-readable name for logging + const std::string name_; + + // The callback function, called via WatcherHandleImpl by either the target or the manager + const std::shared_ptr fn_; +}; + +} // namespace SafeInit +} // namespace Envoy diff --git a/source/server/init_manager_impl.h b/source/server/init_manager_impl.h index 0f3ec0d2321f7..e84ec4fbd32d7 100644 --- a/source/server/init_manager_impl.h +++ b/source/server/init_manager_impl.h @@ -11,7 +11,8 @@ namespace Server { /** * Implementation of Init::Manager for use during post cluster manager init / pre listening. - * TODO(JimmyCYJ): Move InitManagerImpl into a new subdirectory in source/ called init/. + * Deprecated, use SafeInit::ManagerImpl instead. + * TODO(mergeconflict): convert all Init::ManagerImpl uses to SafeInit::ManagerImpl. */ class InitManagerImpl : public Init::Manager, Logger::Loggable { public: diff --git a/test/common/safe_init/BUILD b/test/common/safe_init/BUILD new file mode 100644 index 0000000000000..35dd33cd09b7c --- /dev/null +++ b/test/common/safe_init/BUILD @@ -0,0 +1,34 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +envoy_package() + +envoy_cc_test( + name = "watcher_impl_test", + srcs = ["watcher_impl_test.cc"], + deps = [ + "//test/mocks/safe_init:safe_init_mocks", + ], +) + +envoy_cc_test( + name = "target_impl_test", + srcs = ["target_impl_test.cc"], + deps = [ + "//test/mocks/safe_init:safe_init_mocks", + ], +) + +envoy_cc_test( + name = "manager_impl_test", + srcs = ["manager_impl_test.cc"], + deps = [ + "//source/common/safe_init:manager_lib", + "//test/mocks/safe_init:safe_init_mocks", + ], +) diff --git a/test/common/safe_init/manager_impl_test.cc b/test/common/safe_init/manager_impl_test.cc new file mode 100644 index 0000000000000..88e6cc97c7d34 --- /dev/null +++ b/test/common/safe_init/manager_impl_test.cc @@ -0,0 +1,194 @@ +#include "common/safe_init/manager_impl.h" + +#include "test/mocks/safe_init/mocks.h" + +#include "gtest/gtest.h" + +using ::testing::InSequence; +using ::testing::InvokeWithoutArgs; + +namespace Envoy { +namespace SafeInit { +namespace { + +void expectUninitialized(const Manager& m) { EXPECT_EQ(Manager::State::Uninitialized, m.state()); } +void expectInitializing(const Manager& m) { EXPECT_EQ(Manager::State::Initializing, m.state()); } +void expectInitialized(const Manager& m) { EXPECT_EQ(Manager::State::Initialized, m.state()); } + +TEST(SafeInitManagerImplTest, AddImmediateTargetsWhenUninitialized) { + InSequence s; + + ManagerImpl m("test"); + expectUninitialized(m); + + ExpectableTargetImpl t1("t1"); + m.add(t1); + + ExpectableTargetImpl t2("t2"); + m.add(t2); + + ExpectableWatcherImpl w; + + // initialization should complete immediately + t1.expectInitializeWillCallReady(); + t2.expectInitializeWillCallReady(); + w.expectReady(); + m.initialize(w); + expectInitialized(m); +} + +TEST(SafeInitManagerImplTest, AddAsyncTargetsWhenUninitialized) { + InSequence s; + + ManagerImpl m("test"); + expectUninitialized(m); + + ExpectableTargetImpl t1("t1"); + m.add(t1); + + ExpectableTargetImpl t2("t2"); + m.add(t2); + + ExpectableWatcherImpl w; + + // initialization should begin + t1.expectInitialize(); + t2.expectInitialize(); + m.initialize(w); + expectInitializing(m); + + // should still be initializing after first target initializes + t1.ready(); + expectInitializing(m); + + // initialization should finish after second target initializes + w.expectReady(); + t2.ready(); + expectInitialized(m); +} + +TEST(SafeInitManagerImplTest, AddMixedTargetsWhenUninitialized) { + InSequence s; + + ManagerImpl m("test"); + expectUninitialized(m); + + ExpectableTargetImpl t1("t1"); + m.add(t1); + + ExpectableTargetImpl t2("t2"); + m.add(t2); + + ExpectableWatcherImpl w; + + // initialization should begin, and first target will initialize immediately + t1.expectInitializeWillCallReady(); + t2.expectInitialize(); + m.initialize(w); + expectInitializing(m); + + // initialization should finish after second target initializes + w.expectReady(); + t2.ready(); + expectInitialized(m); +} + +TEST(SafeInitManagerImplTest, AddImmediateTargetWhenInitializing) { + InSequence s; + + ManagerImpl m("test"); + expectUninitialized(m); + + ExpectableTargetImpl t1("t1"); + m.add(t1); + + ExpectableWatcherImpl w; + + // initialization should begin + t1.expectInitialize(); + m.initialize(w); + expectInitializing(m); + + // adding an immediate target shouldn't finish initialization + ExpectableTargetImpl t2("t2"); + t2.expectInitializeWillCallReady(); + m.add(t2); + expectInitializing(m); + + // initialization should finish after original target initializes + w.expectReady(); + t1.ready(); + expectInitialized(m); +} + +TEST(SafeInitManagerImplTest, UnavailableTarget) { + InSequence s; + + ManagerImpl m("test"); + expectUninitialized(m); + + // add a target and destroy it + { + ExpectableTargetImpl t("t"); + m.add(t); + t.expectInitialize().Times(0); + } + + ExpectableWatcherImpl w; + + // initialization should complete despite the destroyed target + w.expectReady(); + m.initialize(w); + expectInitialized(m); +} + +TEST(SafeInitManagerImplTest, UnavailableManager) { + InSequence s; + + ExpectableTargetImpl t("t"); + ExpectableWatcherImpl w; + + { + ManagerImpl m("test"); + expectUninitialized(m); + + m.add(t); + + // initialization should begin before destroying the manager + t.expectInitialize(); + m.initialize(w); + expectInitializing(m); + } + + // the watcher should not be notified when the target is initialized + w.expectReady().Times(0); + t.ready(); +} + +TEST(SafeInitManagerImplTest, UnavailableWatcher) { + InSequence s; + + ManagerImpl m("test"); + expectUninitialized(m); + + ExpectableTargetImpl t("t"); + m.add(t); + + { + ExpectableWatcherImpl w; + + // initialization should begin before destroying the watcher + t.expectInitialize(); + m.initialize(w); + expectInitializing(m); + + w.expectReady().Times(0); + } + + // initialization should finish without notifying the watcher + t.ready(); +} + +} // namespace +} // namespace SafeInit +} // namespace Envoy diff --git a/test/common/safe_init/target_impl_test.cc b/test/common/safe_init/target_impl_test.cc new file mode 100644 index 0000000000000..df0c41fad2f1e --- /dev/null +++ b/test/common/safe_init/target_impl_test.cc @@ -0,0 +1,65 @@ +#include "test/mocks/safe_init/mocks.h" + +#include "gtest/gtest.h" + +using ::testing::InSequence; + +namespace Envoy { +namespace SafeInit { +namespace { + +TEST(SafeInitTargetImplTest, Name) { + ExpectableTargetImpl target; + EXPECT_EQ("target test", target.name()); +} + +TEST(SafeInitTargetImplTest, InitializeWhenAvailable) { + InSequence s; + + ExpectableTargetImpl target; + ExpectableWatcherImpl watcher; + + // initializing the target through its handle should invoke initialize()... + target.expectInitialize(); + EXPECT_TRUE(target.createHandle("test")->initialize(watcher)); + + // calling ready() on the target should invoke the saved watcher handle... + watcher.expectReady(); + EXPECT_TRUE(target.ready()); + + // calling ready() a second time should have no effect. + watcher.expectReady().Times(0); + EXPECT_FALSE(target.ready()); +} + +TEST(SafeInitTargetImplTest, InitializeWhenUnavailable) { + ExpectableWatcherImpl watcher; + TargetHandlePtr handle; + { + ExpectableTargetImpl target; + + // initializing the target after it's been destroyed should do nothing. + handle = target.createHandle("test"); + target.expectInitialize().Times(0); + } + EXPECT_FALSE(handle->initialize(watcher)); +} + +TEST(SafeInitTargetImplTest, ReadyWhenWatcherUnavailable) { + ExpectableTargetImpl target; + { + ExpectableWatcherImpl watcher; + + // initializing the target through its handle should invoke initialize()... + target.expectInitialize(); + EXPECT_TRUE(target.createHandle("test")->initialize(watcher)); + + // calling ready() on the target after the watcher has been destroyed should do nothing. + watcher.expectReady().Times(0); + } + EXPECT_FALSE(target.ready()); +} + +} // namespace +} // namespace SafeInit +} // namespace Envoy diff --git a/test/common/safe_init/watcher_impl_test.cc b/test/common/safe_init/watcher_impl_test.cc new file mode 100644 index 0000000000000..39abccbf40f93 --- /dev/null +++ b/test/common/safe_init/watcher_impl_test.cc @@ -0,0 +1,36 @@ +#include "test/mocks/safe_init/mocks.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace SafeInit { +namespace { + +TEST(SafeInitWatcherImplTest, Name) { + ExpectableWatcherImpl watcher; + EXPECT_EQ("test", watcher.name()); +} + +TEST(SafeInitWatcherImplTest, ReadyWhenAvailable) { + ExpectableWatcherImpl watcher; + + // notifying the watcher through its handle should invoke ready(). + watcher.expectReady(); + EXPECT_TRUE(watcher.createHandle("test")->ready()); +} + +TEST(SafeInitWatcherImplTest, ReadyWhenUnavailable) { + WatcherHandlePtr handle; + { + ExpectableWatcherImpl watcher; + + // notifying the watcher after it's been destroyed should do nothing. + handle = watcher.createHandle("test"); + watcher.expectReady().Times(0); + } + EXPECT_FALSE(handle->ready()); +} + +} // namespace +} // namespace SafeInit +} // namespace Envoy diff --git a/test/mocks/safe_init/BUILD b/test/mocks/safe_init/BUILD new file mode 100644 index 0000000000000..fbb24c52d1861 --- /dev/null +++ b/test/mocks/safe_init/BUILD @@ -0,0 +1,20 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_package", +) + +envoy_package() + +envoy_cc_mock( + name = "safe_init_mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//include/envoy/safe_init:manager_interface", + "//source/common/safe_init:target_lib", + "//source/common/safe_init:watcher_lib", + ], +) diff --git a/test/mocks/safe_init/mocks.cc b/test/mocks/safe_init/mocks.cc new file mode 100644 index 0000000000000..1ef93da2f3074 --- /dev/null +++ b/test/mocks/safe_init/mocks.cc @@ -0,0 +1,25 @@ +#include "test/mocks/safe_init/mocks.h" + +namespace Envoy { +namespace SafeInit { + +using ::testing::Invoke; + +ExpectableWatcherImpl::ExpectableWatcherImpl(absl::string_view name) + : WatcherImpl(name, {[this]() { ready(); }}) {} +::testing::internal::TypedExpectation& ExpectableWatcherImpl::expectReady() const { + return EXPECT_CALL(*this, ready()); +} + +ExpectableTargetImpl::ExpectableTargetImpl(absl::string_view name) + : TargetImpl(name, {[this]() { initialize(); }}) {} +::testing::internal::TypedExpectation& ExpectableTargetImpl::expectInitialize() { + return EXPECT_CALL(*this, initialize()); +} +::testing::internal::TypedExpectation& +ExpectableTargetImpl::expectInitializeWillCallReady() { + return expectInitialize().WillOnce(Invoke([this]() { ready(); })); +} + +} // namespace SafeInit +} // namespace Envoy diff --git a/test/mocks/safe_init/mocks.h b/test/mocks/safe_init/mocks.h new file mode 100644 index 0000000000000..92a41cf4d7389 --- /dev/null +++ b/test/mocks/safe_init/mocks.h @@ -0,0 +1,66 @@ +#pragma once + +#include "envoy/safe_init/manager.h" + +#include "common/safe_init/target_impl.h" +#include "common/safe_init/watcher_impl.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace SafeInit { + +/** + * ExpectableWatcherImpl is a real WatcherImpl, subclassed to add a mock `ready` method that you can + * set expectations on in tests. Tests should never want a watcher with different behavior than the + * real implementation. + */ +class ExpectableWatcherImpl : public WatcherImpl { +public: + ExpectableWatcherImpl(absl::string_view name = "test"); + MOCK_CONST_METHOD0(ready, void()); + + /** + * Convenience method to provide a shorthand for EXPECT_CALL(watcher, ready()). Can be chained, + * for example: watcher.expectReady().Times(0); + */ + ::testing::internal::TypedExpectation& expectReady() const; +}; + +/** + * ExpectableTargetImpl is a real TargetImpl, subclassed to add a mock `initialize` method that you + * can set expectations on in tests. Tests should never want a target with a different behavior than + * the real implementation. + */ +class ExpectableTargetImpl : public TargetImpl { +public: + ExpectableTargetImpl(absl::string_view name = "test"); + MOCK_METHOD0(initialize, void()); + + /** + * Convenience method to provide a shorthand for EXPECT_CALL(target, initialize()). Can be + * chained, for example: target.expectInitialize().Times(0); + */ + ::testing::internal::TypedExpectation& expectInitialize(); + + /** + * Convenience method to provide a shorthand for expectInitialize() with mocked behavior of + * calling `ready` immediately. + */ + ::testing::internal::TypedExpectation& expectInitializeWillCallReady(); +}; + +/** + * MockManager is a typical mock. In many cases, it won't be necessary to mock any of its methods. + * In cases where its `add` and `initialize` methods are actually called in a test, it's usually + * sufficient to mock `add` by saving the target argument locally, and to mock `initialize` by + * invoking the saved target with the watcher argument. + */ +struct MockManager : Manager { + MOCK_CONST_METHOD0(state, Manager::State()); + MOCK_METHOD1(add, void(const Target&)); + MOCK_METHOD1(initialize, void(const Watcher&)); +}; + +} // namespace SafeInit +} // namespace Envoy From 7b1909bc9fce255169c4c548cb692707fbc4e1f3 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Fri, 22 Mar 2019 15:46:28 -0700 Subject: [PATCH 002/165] convert HCM test configs to v2 YAML (#6354) Signed-off-by: Derek Argueta --- .../network/http_connection_manager/BUILD | 1 - .../http_connection_manager/config_test.cc | 443 +++++++----------- 2 files changed, 181 insertions(+), 263 deletions(-) diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index 80dc8b5955d94..71873e28e9023 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -17,7 +17,6 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.http_connection_manager", deps = [ "//source/common/buffer:buffer_lib", - "//source/common/config:filter_json_lib", "//source/common/event:dispatcher_lib", "//source/extensions/filters/http/dynamo:config", "//source/extensions/filters/http/router:config", diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index d47ac26fd3bb5..d11761322838b 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1,7 +1,6 @@ #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.validate.h" #include "common/buffer/buffer_impl.h" -#include "common/config/filter_json.h" #include "common/http/date_provider_impl.h" #include "extensions/filters/network/http_connection_manager/config.h" @@ -24,17 +23,6 @@ namespace Extensions { namespace NetworkFilters { namespace HttpConnectionManager { -envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager -parseHttpConnectionManagerFromJson(const std::string& json_string) { - envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager - http_connection_manager; - auto json_object_ptr = Json::Factory::loadFromString(json_string); - NiceMock scope; - Config::FilterJson::translateHttpConnectionManager(*json_object_ptr, http_connection_manager, - scope.statsOptions()); - return http_connection_manager; -} - envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager parseHttpConnectionManagerFromV2Yaml(const std::string& yaml) { envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager @@ -59,69 +47,55 @@ TEST_F(HttpConnectionManagerConfigTest, ValidateFail) { } TEST_F(HttpConnectionManagerConfigTest, InvalidFilterName) { - const std::string json_string = R"EOF( - { - "codec_type": "http1", - "stat_prefix": "router", - "route_config": - { - "virtual_hosts": [ - { - "name": "service", - "domains": [ "*" ], - "routes": [ - { - "prefix": "/", - "cluster": "cluster" - } - ] - } - ] - }, - "filters": [ - { "name": "foo", "config": {} } - ] - } + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config: {} )EOF"; EXPECT_THROW_WITH_MESSAGE( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromJson(json_string), context_, + HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, date_provider_, route_config_provider_manager_), EnvoyException, "Didn't find a registered implementation for name: 'foo'"); } TEST_F(HttpConnectionManagerConfigTest, MiscConfig) { - const std::string json_string = R"EOF( - { - "codec_type": "http1", - "server_name": "foo", - "stat_prefix": "router", - "route_config": - { - "virtual_hosts": [ - { - "name": "service", - "domains": [ "*" ], - "routes": [ - { - "prefix": "/", - "cluster": "cluster" - } - ] - } - ] - }, - "tracing": { - "operation_name": "ingress", - "request_headers_for_tags": [ "foo" ] - }, - "filters": [ - { "name": "http_dynamo_filter", "config": {} } - ] - } + const std::string yaml_string = R"EOF( +codec_type: http1 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +tracing: + operation_name: ingress + request_headers_for_tags: + - foo +http_filters: +- name: envoy.router + config: {} )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromJson(json_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, date_provider_, route_config_provider_manager_); EXPECT_THAT(std::vector({Http::LowerCaseString("foo")}), @@ -257,233 +231,178 @@ TEST_F(HttpConnectionManagerConfigTest, UnconfiguredRequestTimeout) { } TEST_F(HttpConnectionManagerConfigTest, SingleDateProvider) { - const std::string json_string = R"EOF( - { - "codec_type": "http1", - "stat_prefix": "router", - "route_config": - { - "virtual_hosts": [ - { - "name": "service", - "domains": [ "*" ], - "routes": [ - { - "prefix": "/", - "cluster": "cluster" - } - ] - } - ] - }, - "filters": [ - { "name": "http_dynamo_filter", "config": {} } - ] - } + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.http_dynamo_filter + config: {} + )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); + auto proto_config = parseHttpConnectionManagerFromV2Yaml(yaml_string); HttpConnectionManagerFilterConfigFactory factory; // We expect a single slot allocation vs. multiple. EXPECT_CALL(context_.thread_local_, allocateSlot()); - Network::FilterFactoryCb cb1 = factory.createFilterFactory(*json_config, context_); - Network::FilterFactoryCb cb2 = factory.createFilterFactory(*json_config, context_); + Network::FilterFactoryCb cb1 = factory.createFilterFactoryFromProto(proto_config, context_); + Network::FilterFactoryCb cb2 = factory.createFilterFactoryFromProto(proto_config, context_); } TEST_F(HttpConnectionManagerConfigTest, BadHttpConnectionMangerConfig) { - std::string json_string = R"EOF( - { - "codec_type" : "http1", - "stat_prefix" : "my_stat_prefix", - "route_config" : { - "virtual_hosts" : [ - { - "name" : "default", - "domains" : ["*"], - "routes" : [ - { - "prefix" : "/", - "cluster": "fake_cluster" - } - ] - } - ] - }, - "filter" : [{}] - } + std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: my_stat_prefix +route_config: + virtual_hosts: + - name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: fake_cluster +filter: +- {} )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - HttpConnectionManagerFilterConfigFactory factory; - EXPECT_THROW(factory.createFilterFactory(*json_config, context_), Json::Exception); + EXPECT_THROW(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException); } TEST_F(HttpConnectionManagerConfigTest, BadAccessLogConfig) { - std::string json_string = R"EOF( - { - "codec_type" : "http1", - "stat_prefix" : "my_stat_prefix", - "route_config" : { - "virtual_hosts" : [ - { - "name" : "default", - "domains" : ["*"], - "routes" : [ - { - "prefix" : "/", - "cluster": "fake_cluster" - } - ] - } - ] - }, - "filters" : [ - { - "type" : "both", - "name" : "http_dynamo_filter", - "config" : {} - } - ], - "access_log" :[ - { - "path" : "mypath", - "filter" : [] - } - ] - } + std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: my_stat_prefix +route_config: + virtual_hosts: + - name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: fake_cluster +http_filters: +- name: envoy.http_dynamo_filter + config: {} +access_log: +- name: envoy.file_access_log + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: "/dev/null" + filter: [] )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - HttpConnectionManagerFilterConfigFactory factory; - EXPECT_THROW(factory.createFilterFactory(*json_config, context_), Json::Exception); + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + "filter: Proto field is not repeating, cannot start list."); } TEST_F(HttpConnectionManagerConfigTest, BadAccessLogType) { - std::string json_string = R"EOF( - { - "codec_type" : "http1", - "stat_prefix" : "my_stat_prefix", - "route_config" : { - "virtual_hosts" : [ - { - "name" : "default", - "domains" : ["*"], - "routes" : [ - { - "prefix" : "/", - "cluster": "fake_cluster" - } - ] - } - ] - }, - "filters" : [ - { - "type" : "both", - "name" : "http_dynamo_filter", - "config" : {} - } - ], - "access_log" :[ - { - "path" : "mypath", - "filter" : { - "type" : "bad_type" - } - } - ] - } + std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: my_stat_prefix +route_config: + virtual_hosts: + - name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: fake_cluster +http_filters: +- name: envoy.http_dynamo_filter + config: {} +access_log: +- name: envoy.file_access_log + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: "/dev/null" + filter: + bad_type: {} )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - HttpConnectionManagerFilterConfigFactory factory; - EXPECT_THROW(factory.createFilterFactory(*json_config, context_), Json::Exception); + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + "bad_type: Cannot find field"); } TEST_F(HttpConnectionManagerConfigTest, BadAccessLogNestedTypes) { - std::string json_string = R"EOF( - { - "codec_type" : "http1", - "stat_prefix" : "my_stat_prefix", - "route_config" : { - "virtual_hosts" : [ - { - "name" : "default", - "domains" : ["*"], - "routes" : [ - { - "prefix" : "/", - "cluster": "fake_cluster" - } - ] - } - ] - }, - "filters" : [ - { - "type" : "both", - "name" : "http_dynamo_filter", - "config" : {} - } - ], - "access_log" :[ - { - "path": "/dev/null", - "filter": { - "type": "logical_and", - "filters": [ - { - "type": "logical_or", - "filters": [ - {"type": "duration", "op": ">=", "value": 10000}, - {"type": "bad_type"} - ] - }, - {"type": "not_healthcheck"} - ] - } - } - ] - } + std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: my_stat_prefix +route_config: + virtual_hosts: + - name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: fake_cluster +http_filters: +- name: envoy.http_dynamo_filter + config: {} +access_log: +- name: envoy.file_access_log + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: "/dev/null" + filter: + and_filter: + filters: + - or_filter: + filters: + - duration_filter: + op: ">=" + value: 10000 + - bad_type: {} + - not_health_check_filter: {} )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - HttpConnectionManagerFilterConfigFactory factory; - EXPECT_THROW(factory.createFilterFactory(*json_config, context_), Json::Exception); + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + "bad_type: Cannot find field"); } class FilterChainTest : public HttpConnectionManagerConfigTest { public: const std::string basic_config_ = R"EOF( - { - "codec_type": "http1", - "server_name": "foo", - "stat_prefix": "router", - "route_config": - { - "virtual_hosts": [ - { - "name": "service", - "domains": [ "*" ], - "routes": [ - { - "prefix": "/", - "cluster": "cluster" - } - ] - } - ] - }, - "filters": [ - { "name": "http_dynamo_filter", "config": {} }, - { "name": "router", "config": {} } - ] - } +codec_type: http1 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.http_dynamo_filter + config: {} +- name: envoy.router + config: {} + )EOF"; }; TEST_F(FilterChainTest, createFilterChain) { - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromJson(basic_config_), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(basic_config_), context_, date_provider_, route_config_provider_manager_); Http::MockFilterChainFactoryCallbacks callbacks; @@ -494,7 +413,7 @@ TEST_F(FilterChainTest, createFilterChain) { // Tests where upgrades are configured on via the HCM. TEST_F(FilterChainTest, createUpgradeFilterChain) { - auto hcm_config = parseHttpConnectionManagerFromJson(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, @@ -539,7 +458,7 @@ TEST_F(FilterChainTest, createUpgradeFilterChain) { // Tests where upgrades are configured off via the HCM. TEST_F(FilterChainTest, createUpgradeFilterChainHCMDisabled) { - auto hcm_config = parseHttpConnectionManagerFromJson(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); hcm_config.mutable_upgrade_configs(0)->mutable_enabled()->set_value(false); @@ -576,7 +495,7 @@ TEST_F(FilterChainTest, createUpgradeFilterChainHCMDisabled) { } TEST_F(FilterChainTest, createCustomUpgradeFilterChain) { - auto hcm_config = parseHttpConnectionManagerFromJson(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); auto websocket_config = hcm_config.add_upgrade_configs(); websocket_config->set_upgrade_type("websocket"); @@ -617,7 +536,7 @@ TEST_F(FilterChainTest, createCustomUpgradeFilterChain) { } TEST_F(FilterChainTest, invalidConfig) { - auto hcm_config = parseHttpConnectionManagerFromJson(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("WEBSOCKET"); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); From 260e42a16ddba6d6c325b8fab0dc76595da9c686 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 22 Mar 2019 15:55:56 -0700 Subject: [PATCH 003/165] docs/configs/examples: use envoy-dev images (#6357) We need to think about whether we want to have all of these somehow reference some type of environment variable that would point to the right image in the context of the tree the user is looking at, but given that the trunk documentation may require a master build, this is more correct. Signed-off-by: Matt Klein --- ci/README.md | 2 +- configs/Dockerfile | 2 +- docs/root/start/start.rst | 8 ++++---- examples/cors/backend/Dockerfile-frontenvoy | 2 +- examples/cors/frontend/Dockerfile-frontenvoy | 2 +- examples/fault-injection/Dockerfile-envoy | 2 +- examples/front-proxy/Dockerfile-frontenvoy | 2 +- examples/grpc-bridge/Dockerfile-grpc | 2 +- examples/grpc-bridge/Dockerfile-python | 2 +- examples/lua/Dockerfile-proxy | 2 +- examples/mysql/Dockerfile-proxy | 2 +- examples/redis/Dockerfile-proxy | 2 +- 12 files changed, 15 insertions(+), 15 deletions(-) diff --git a/ci/README.md b/ci/README.md index 05402fe1b88df..9a6bcb776c11e 100644 --- a/ci/README.md +++ b/ci/README.md @@ -8,7 +8,7 @@ where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoypr may work with `envoyproxy/envoy-build:latest` to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image. Moreover, the Docker image at [`envoyproxy/envoy:`](https://hub.docker.com/r/envoyproxy/envoy/) is an image that has an Envoy binary at `/usr/local/bin/envoy`. The `` -corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy:latest` contains an Envoy +corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy binary built from the latest tip of master that passed tests. ## Alpine Envoy image diff --git a/configs/Dockerfile b/configs/Dockerfile index e81237686687b..2d7b7a6a5e3bf 100644 --- a/configs/Dockerfile +++ b/configs/Dockerfile @@ -1,7 +1,7 @@ # This configuration will build a Docker container containing # an Envoy proxy that routes to Google. -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update COPY google_com_proxy.v2.yaml /etc/envoy.yaml CMD /usr/local/bin/envoy -c /etc/envoy.yaml diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 92bfb36f1c3a5..b3da8ce4b2543 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -24,8 +24,8 @@ A very minimal Envoy configuration that can be used to validate basic plain HTTP proxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not intended to represent a realistic Envoy deployment:: - $ docker pull envoyproxy/envoy:latest - $ docker run --rm -d -p 10000:10000 envoyproxy/envoy:latest + $ docker pull envoyproxy/envoy-dev:latest + $ docker run --rm -d -p 10000:10000 envoyproxy/envoy-dev:latest $ curl -v localhost:10000 The Docker image used will contain the latest version of Envoy @@ -115,7 +115,7 @@ You can refer to the :ref:`Command line options `. .. code-block:: none - FROM envoyproxy/envoy:latest + FROM envoyproxy/envoy-dev:latest COPY envoy.yaml /etc/envoy/envoy.yaml Build the Docker image that runs your configuration using:: @@ -138,7 +138,7 @@ by using a volume. version: '3' services: envoy: - image: envoyproxy/envoy:latest + image: envoyproxy/envoy-dev:latest ports: - "10000:10000" volumes: diff --git a/examples/cors/backend/Dockerfile-frontenvoy b/examples/cors/backend/Dockerfile-frontenvoy index 98413046a146d..83b5ba806c6a1 100644 --- a/examples/cors/backend/Dockerfile-frontenvoy +++ b/examples/cors/backend/Dockerfile-frontenvoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl diff --git a/examples/cors/frontend/Dockerfile-frontenvoy b/examples/cors/frontend/Dockerfile-frontenvoy index 98413046a146d..83b5ba806c6a1 100644 --- a/examples/cors/frontend/Dockerfile-frontenvoy +++ b/examples/cors/frontend/Dockerfile-frontenvoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl diff --git a/examples/fault-injection/Dockerfile-envoy b/examples/fault-injection/Dockerfile-envoy index 421547df5a887..f4c09bae67c5e 100644 --- a/examples/fault-injection/Dockerfile-envoy +++ b/examples/fault-injection/Dockerfile-envoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get install -y curl tree COPY enable_delay_fault_injection.sh disable_delay_fault_injection.sh enable_abort_fault_injection.sh disable_abort_fault_injection.sh send_request.sh / diff --git a/examples/front-proxy/Dockerfile-frontenvoy b/examples/front-proxy/Dockerfile-frontenvoy index 98413046a146d..83b5ba806c6a1 100644 --- a/examples/front-proxy/Dockerfile-frontenvoy +++ b/examples/front-proxy/Dockerfile-frontenvoy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl diff --git a/examples/grpc-bridge/Dockerfile-grpc b/examples/grpc-bridge/Dockerfile-grpc index 80d2b12e8ab18..f8e3cb3ad27a4 100644 --- a/examples/grpc-bridge/Dockerfile-grpc +++ b/examples/grpc-bridge/Dockerfile-grpc @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN mkdir /var/log/envoy/ COPY ./bin/service /usr/local/bin/srv diff --git a/examples/grpc-bridge/Dockerfile-python b/examples/grpc-bridge/Dockerfile-python index 02aa308c2acb1..84d468d2b3d2f 100644 --- a/examples/grpc-bridge/Dockerfile-python +++ b/examples/grpc-bridge/Dockerfile-python @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest RUN apt-get update RUN apt-get -q install -y python-dev \ diff --git a/examples/lua/Dockerfile-proxy b/examples/lua/Dockerfile-proxy index 26aaebb9ab6dc..92b320ea14879 100644 --- a/examples/lua/Dockerfile-proxy +++ b/examples/lua/Dockerfile-proxy @@ -1,2 +1,2 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy diff --git a/examples/mysql/Dockerfile-proxy b/examples/mysql/Dockerfile-proxy index c250ec540e830..ad18604cd0c78 100644 --- a/examples/mysql/Dockerfile-proxy +++ b/examples/mysql/Dockerfile-proxy @@ -1,3 +1,3 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug diff --git a/examples/redis/Dockerfile-proxy b/examples/redis/Dockerfile-proxy index 26aaebb9ab6dc..92b320ea14879 100644 --- a/examples/redis/Dockerfile-proxy +++ b/examples/redis/Dockerfile-proxy @@ -1,2 +1,2 @@ -FROM envoyproxy/envoy:latest +FROM envoyproxy/envoy-dev:latest CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy From bea9cd0c801e6bb600773efc2f2aef96a54fa080 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Sat, 23 Mar 2019 09:34:29 -0700 Subject: [PATCH 004/165] remove v1 Redis HC tests (#6367) Signed-off-by: Derek Argueta --- .../health_checkers/redis/config_test.cc | 51 ------------------- 1 file changed, 51 deletions(-) diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index b33c6d5e03ed7..c595c11029404 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -112,57 +112,6 @@ TEST(HealthCheckerFactoryTest, createRedisViaUpstreamHealthCheckerFactory) { dispatcher, log_manager) .get())); } - -TEST(HealthCheckerFactoryTest, createRedisWithDeprecatedV1JsonConfig) { - const std::string json = R"EOF( - { - "type": "redis", - "timeout_ms": 1000, - "interval_ms": 1000, - "unhealthy_threshold": 1, - "healthy_threshold": 1 - } - )EOF"; - - NiceMock cluster; - Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; - Event::MockDispatcher dispatcher; - AccessLog::MockAccessLogManager log_manager; - EXPECT_NE(nullptr, dynamic_cast( - // Always use Upstream's HealthCheckerFactory when creating instance using - // deprecated config. - Upstream::HealthCheckerFactory::create( - Upstream::parseHealthCheckFromV1Json(json), cluster, runtime, random, - dispatcher, log_manager) - .get())); -} - -TEST(HealthCheckerFactoryTest, createRedisWithDeprecatedV1JsonConfigWithKey) { - const std::string json = R"EOF( - { - "type": "redis", - "timeout_ms": 1000, - "interval_ms": 1000, - "unhealthy_threshold": 1, - "healthy_threshold": 1, - "redis_key": "foo" - } - )EOF"; - - NiceMock cluster; - Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; - Event::MockDispatcher dispatcher; - AccessLog::MockAccessLogManager log_manager; - EXPECT_NE(nullptr, dynamic_cast( - // Always use Upstream's HealthCheckerFactory when creating instance using - // deprecated config. - Upstream::HealthCheckerFactory::create( - Upstream::parseHealthCheckFromV1Json(json), cluster, runtime, random, - dispatcher, log_manager) - .get())); -} } // namespace } // namespace RedisHealthChecker } // namespace HealthCheckers From 80ff3651fc7e21bf1153be6f499136412146a105 Mon Sep 17 00:00:00 2001 From: Spencer Lewis Date: Sat, 23 Mar 2019 09:35:23 -0700 Subject: [PATCH 005/165] upstream: consistent H1 and H2 rq_total stats (#6352) Previously, we incremented rq_total and upstream_rq_total in the HTTP/1 conn pool even if the request ended up being circuit broken. The stats were not incremented for HTTP/2 requests. This change no longer increments the stats for HTTP/1 circuit broken requests for consistency between the two. Signed-off-by: Spencer Lewis --- docs/root/intro/version_history.rst | 1 + source/common/http/http1/conn_pool.cc | 4 ++-- test/common/http/http1/conn_pool_test.cc | 9 +++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 1987acbadeb8a..ec3b38d8353b7 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -72,6 +72,7 @@ Version history * tracing: added :ref:`verbose ` to support logging annotations on spans. * upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size` config parameter to strictly bound the ring size. * upstream: added configuration option to select any host when the fallback policy fails. +* upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. 1.9.0 (Dec 20, 2018) ==================== diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 139c814448495..119711fc1505a 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -66,6 +66,8 @@ bool ConnPoolImpl::hasActiveConnections() const { void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { ASSERT(!client.stream_wrapper_); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->stats().rq_total_.inc(); client.stream_wrapper_ = std::make_unique(response_decoder, client); callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_); } @@ -90,8 +92,6 @@ void ConnPoolImpl::createNewConnection() { ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { - host_->cluster().stats().upstream_rq_total_.inc(); - host_->stats().rq_total_.inc(); if (!ready_clients_.empty()) { ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 3d5ba3d40c73b..c66a2d9a41d55 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -169,7 +169,9 @@ struct ActiveTestRequest { parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( Network::ConnectionEvent::Connected); } - EXPECT_EQ(current_rq_total + 1, parent_.cluster_->stats_.upstream_rq_total_.value()); + if (type != Type::Pending) { + EXPECT_EQ(current_rq_total + 1, parent_.cluster_->stats_.upstream_rq_total_.value()); + } } void completeResponse(bool with_body) { @@ -368,7 +370,7 @@ TEST_F(Http1ConnPoolImplTest, ConnectTimeout) { EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); dispatcher_.clearDeferredDeleteList(); - EXPECT_EQ(2U, cluster_->stats_.upstream_rq_total_.value()); + EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value()); EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value()); } @@ -630,6 +632,7 @@ TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { r1.completeResponse(false); conn_pool_.expectAndRunUpstreamReady(); r3.startRequest(); + EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value()); r2.completeResponse(false); r3.completeResponse(false); @@ -651,6 +654,7 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) { ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); r2.handle_->cancel(); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); EXPECT_CALL(drained, ready()); r1.startRequest(); @@ -756,6 +760,7 @@ TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { EXPECT_CALL(conn_pool_, onClientDestroy()); r1.handle_->cancel(); + EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); conn_pool_.drainConnections(); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); From 2b6218c8f19949b8dc53ecd892c179a9bc3702b3 Mon Sep 17 00:00:00 2001 From: danzh Date: Mon, 25 Mar 2019 11:03:57 -0400 Subject: [PATCH 006/165] quiche: add macro DCHECK in quic_logging_impl.h (#6358) Address one TOTO in that file that (D)CHECK is not explicit listed in platform API, but is supposed to be defined in some impl. Define them in quic_logging_impl.h seems appropriate. Risk Level: low, not in use Part of #2557 Signed-off-by: Dan Zhang --- .../quiche/platform/quic_logging_impl.h | 7 +++++-- .../quiche/platform/quic_platform_test.cc | 13 +++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h index 813a9bb0f9631..b71dba7b39259 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h @@ -17,8 +17,6 @@ #include "absl/base/optimization.h" #include "absl/synchronization/mutex.h" -// TODO(wub): Add CHECK/DCHECK and variants, which are not explicitly exposed by quic_logging.h. - // If |condition| is true, use |logstream| to stream the log message and send it to spdlog. // If |condition| is false, |logstream| will not be instantiated. // The switch(0) is used to suppress a compiler warning on ambiguous "else". @@ -55,8 +53,12 @@ #define QUIC_LOG_WARNING_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::WARNING) #define QUIC_LOG_ERROR_IS_ON_IMPL() quic::IsLogLevelEnabled(quic::ERROR) +#define CHECK(condition) \ + QUIC_LOG_IF_IMPL(FATAL, ABSL_PREDICT_FALSE(!(condition))) << "CHECK failed: " #condition "." + #ifdef NDEBUG // Release build +#define DCHECK(condition) QUIC_COMPILED_OUT_LOG() #define QUIC_COMPILED_OUT_LOG() QUIC_LOG_IMPL_INTERNAL(false, quic::NullLogStream().stream()) #define QUIC_DVLOG_IMPL(verbosity) QUIC_COMPILED_OUT_LOG() #define QUIC_DVLOG_IF_IMPL(verbosity, condition) QUIC_COMPILED_OUT_LOG() @@ -66,6 +68,7 @@ #define QUIC_NOTREACHED_IMPL() #else // Debug build +#define DCHECK(condition) CHECK(condition) #define QUIC_DVLOG_IMPL(verbosity) QUIC_VLOG_IMPL(verbosity) #define QUIC_DVLOG_IF_IMPL(verbosity, condition) QUIC_VLOG_IF_IMPL(verbosity, condition) #define QUIC_DLOG_IMPL(severity) QUIC_LOG_IMPL(severity) diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index cd8f513e8f9c6..10557ac8b91bb 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -357,6 +357,19 @@ TEST(QuicPlatformTest, QuicDLog) { #undef VALUE_BY_COMPILE_MODE +TEST(QuicPlatformTest, QuicCHECK) { + CHECK(1 == 1); + CHECK(1 == 1) << " 1 == 1 is forever true."; + + EXPECT_DEBUG_DEATH({ DCHECK(false) << " Supposed to fail in debug mode."; }, + "CHECK failed:.* Supposed to fail in debug mode."); + EXPECT_DEBUG_DEATH({ DCHECK(false); }, "CHECK failed"); + + EXPECT_DEATH({ CHECK(false) << " Supposed to fail in all modes."; }, + "CHECK failed:.* Supposed to fail in all modes."); + EXPECT_DEATH({ CHECK(false); }, "CHECK failed"); +} + // Test the behaviors of the cross products of // // {QUIC_LOG, QUIC_DLOG} x {FATAL, DFATAL} x {debug, release} From dcf55449c9c97c5e3d3ff588cbf6e9ea3dc39d3d Mon Sep 17 00:00:00 2001 From: Yuval Kohavi Date: Mon, 25 Mar 2019 12:33:06 -0400 Subject: [PATCH 007/165] fix NPE In refreshCachedRoute (#6359) Signed-off-by: Yuval Kohavi --- source/common/http/conn_manager_impl.cc | 5 ++- test/common/http/conn_manager_impl_test.cc | 47 ++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 2e146102930c3..9c23eb39b6d1a 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1061,7 +1061,10 @@ void ConnectionManagerImpl::startDrainSequence() { } void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { - Router::RouteConstSharedPtr route = snapped_route_config_->route(*request_headers_, stream_id_); + Router::RouteConstSharedPtr route; + if (request_headers_ != nullptr) { + route = snapped_route_config_->route(*request_headers_, stream_id_); + } stream_info_.route_entry_ = route ? route->routeEntry() : nullptr; cached_route_ = std::move(route); if (nullptr == stream_info_.route_entry_) { diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 959bd05eb94e9..cbec81157ee1f 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -1248,6 +1248,53 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) { EXPECT_EQ(1U, stats_.named_.downstream_rq_idle_timeout_.value()); } +TEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdleTimeout) { + stream_idle_timeout_ = std::chrono::milliseconds(10); + setup(false, ""); + + std::shared_ptr filter(new NiceMock()); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamEncoderFilter(filter); + })); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10))); + conn_manager_->newStream(response_encoder_); + + // Expect resetIdleTimer() to be called for the response + // encodeHeaders()/encodeData(). + EXPECT_CALL(*idle_timer, enableTimer(_)).Times(2); + EXPECT_CALL(*idle_timer, disableTimer()); + // Simulate and idle timeout so that the filter chain gets created. + idle_timer->callback_(); + })); + + // This should not be called as we don't have request headers. + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _)).Times(0); + + EXPECT_CALL(*filter, encodeHeaders(_, _)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + // Under heavy load it is possible that stream timeout will be reached before any headers + // were received. Envoy will create a local reply that will go through the encoder filter + // chain. We want to make sure that encoder filters get a null route object. + auto route = filter->callbacks_->route(); + EXPECT_EQ(route.get(), nullptr); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*filter, encodeData(_, _)); + EXPECT_CALL(*filter, encodeComplete()); + EXPECT_CALL(*filter, onDestroy()); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, _)); + EXPECT_CALL(response_encoder_, encodeData(_, _)); + + Buffer::OwnedImpl fake_input; + conn_manager_->onData(fake_input, false); +} + TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { stream_idle_timeout_ = std::chrono::milliseconds(10); setup(false, ""); From 0ac3706ddf0fda091521bb90cffac7bca8530197 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 25 Mar 2019 12:48:21 -0400 Subject: [PATCH 008/165] docs: correct api/envoy/service/auth/v2 docs (#6211) Update some documentation comments in api/envoy/service/auth/v2/*.proto to more accurately describe the *current* behavior (without making any judgment on whether that behavior is "correct" or desirable). Signed-off-by: Luke Shumaker --- .../service/auth/v2/attribute_context.proto | 16 ++++++++++------ api/envoy/service/auth/v2/external_auth.proto | 3 ++- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto index a012a17cd9ad7..7cf7e18eae4c0 100644 --- a/api/envoy/service/auth/v2/attribute_context.proto +++ b/api/envoy/service/auth/v2/attribute_context.proto @@ -86,7 +86,8 @@ message AttributeContext { // lowercased, because HTTP header keys are case-insensitive. map headers = 3; - // The HTTP URL path. + // The request target, as it appears in the first line of the HTTP request. This includes + // the URL path and query-string. No decoding is performed. string path = 4; // The HTTP request `Host` or 'Authority` header value. @@ -95,18 +96,21 @@ message AttributeContext { // The HTTP URL scheme, such as `http` and `https`. string scheme = 6; - // The HTTP URL query in the format of `name1=value`&name2=value2`, as it - // appears in the first line of the HTTP request. No decoding is performed. + // This field is always empty, and exists for compatibility reasons. The HTTP URL query is + // included in `path` field. string query = 7; - // The HTTP URL fragment, excluding leading `#`. No URL decoding is performed. + // This field is always empty, and exists for compatibility reasons. The URL fragment is + // not submitted as part of HTTP requests; it is unknowable. string fragment = 8; // The HTTP request size in bytes. If unknown, it must be -1. int64 size = 9; - // The network protocol used with the request, such as - // "http/1.1", "spdy/3", "h2", "h2c" + // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". + // + // See :repo:`headers.h:ProtocolStrings ` for a list of all + // possible values. string protocol = 10; } diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto index 0f723c98e46c2..ce28506cadfaa 100644 --- a/api/envoy/service/auth/v2/external_auth.proto +++ b/api/envoy/service/auth/v2/external_auth.proto @@ -61,7 +61,8 @@ message OkHttpResponse { // Intended for gRPC and Network Authorization servers `only`. message CheckResponse { - // Status `OK` allows the request. Any other status indicates the request should be denied. + // Status `OK` allows the request. Status `UNKNOWN` causes Envoy to abort. Any other status + // indicates the request should be denied. google.rpc.Status status = 1; // An message that contains HTTP response attributes. This message is From b771f99f53142bbb1496dbcddccc5defaaaa8dd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 25 Mar 2019 10:40:35 -0700 Subject: [PATCH 009/165] ZooKeeper proxy filter (#5991) This filter decodes the ZooKeeper wire protocol and emits stats & metadata about requests, responses and events. This wire protocol parsing is based on: https://github.com/twitter/zktraffic https://github.com/rgs1/zktraffic-cpp The actual filter structure is based on the Mysql proxy filter. Signed-off-by: Raul Gutierrez Segales --- CODEOWNERS | 2 + .../network/zookeeper_proxy/v1alpha1/BUILD | 8 + .../v1alpha1/zookeeper_proxy.proto | 33 + .../network_filters/network_filters.rst | 1 + .../zookeeper_proxy_filter.rst | 92 ++ .../well_known_dynamic_metadata.rst | 1 + docs/root/intro/version_history.rst | 2 + source/common/common/enum_to_int.h | 7 +- source/extensions/extensions_build_config.bzl | 2 + .../filters/network/well_known_names.h | 2 + .../filters/network/zookeeper_proxy/BUILD | 47 + .../zookeeper_proxy/zookeeper_config.cc | 47 + .../zookeeper_proxy/zookeeper_config.h | 33 + .../zookeeper_proxy/zookeeper_decoder.cc | 415 +++++++++ .../zookeeper_proxy/zookeeper_decoder.h | 150 +++ .../zookeeper_proxy/zookeeper_filter.cc | 239 +++++ .../zookeeper_proxy/zookeeper_filter.h | 141 +++ .../zookeeper_proxy/zookeeper_utils.cc | 71 ++ .../network/zookeeper_proxy/zookeeper_utils.h | 45 + .../filters/network/zookeeper_proxy/BUILD | 27 + .../zookeeper_proxy/zookeeper_filter_test.cc | 874 ++++++++++++++++++ tools/spelling_dictionary.txt | 3 + 22 files changed, 2241 insertions(+), 1 deletion(-) create mode 100644 api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD create mode 100644 api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto create mode 100644 docs/root/configuration/network_filters/zookeeper_proxy_filter.rst create mode 100644 source/extensions/filters/network/zookeeper_proxy/BUILD create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_config.cc create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_config.h create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.cc create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.cc create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.h create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.cc create mode 100644 source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.h create mode 100644 test/extensions/filters/network/zookeeper_proxy/BUILD create mode 100644 test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc diff --git a/CODEOWNERS b/CODEOWNERS index abdf25c16f860..0f81b447d285f 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -22,3 +22,5 @@ /*/extensions/filters/network/mysql_proxy @rshriram @venilnoronha @mattklein123 # quic extension /*/extensions/quic_listeners/ @alyssawilk @danzh2010 @mattklein123 @mpwarres @wu-bin +# zookeeper_proxy extension +/*/extensions/filters/network/zookeeper_proxy @rgs1 @snowp diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD new file mode 100644 index 0000000000000..a29ebf3a88484 --- /dev/null +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "zookeeper_proxy", + srcs = ["zookeeper_proxy.proto"], +) diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto new file mode 100644 index 0000000000000..6a8afdd12ec07 --- /dev/null +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.config.filter.network.zookeeper_proxy.v1alpha1; + +option java_outer_classname = "ZookeeperProxyProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; +option go_package = "v1alpha1"; + +import "validate/validate.proto"; +import "google/protobuf/wrappers.proto"; + +// [#protodoc-title: ZooKeeper proxy] +// ZooKeeper Proxy :ref:`configuration overview `. +message ZooKeeperProxy { + // The human readable prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. + // If the access log field is empty, access logs will not be written. + string access_log = 2; + + // Messages — requests, responses and events — that are bigger than this value will + // be ignored. If it is not set, the default value is 1Mb. + // + // The value here should match the jute.maxbuffer property in your cluster configuration: + // + // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options + // + // if that is set. If it isn't, ZooKeeper's default is also 1Mb. + google.protobuf.UInt32Value max_packet_bytes = 3; +} diff --git a/docs/root/configuration/network_filters/network_filters.rst b/docs/root/configuration/network_filters/network_filters.rst index dd559ddd66890..91693bc40ab05 100644 --- a/docs/root/configuration/network_filters/network_filters.rst +++ b/docs/root/configuration/network_filters/network_filters.rst @@ -21,3 +21,4 @@ filters. tcp_proxy_filter thrift_proxy_filter sni_cluster_filter + zookeeper_proxy_filter diff --git a/docs/root/configuration/network_filters/zookeeper_proxy_filter.rst b/docs/root/configuration/network_filters/zookeeper_proxy_filter.rst new file mode 100644 index 0000000000000..cf8e1c9716a72 --- /dev/null +++ b/docs/root/configuration/network_filters/zookeeper_proxy_filter.rst @@ -0,0 +1,92 @@ +.. _config_network_filters_zookeeper_proxy: + +ZooKeeper proxy +=============== + +The ZooKeeper proxy filter decodes the client protocol for +`Apache ZooKeeper `_. It decodes the requests, +responses and events in the payload. Most opcodes known in +`ZooKeeper 3.5 `_ +are supported. The unsupported ones are related to SASL authentication. + +.. attention:: + + The zookeeper_proxy filter is experimental and is currently under active + development. Capabilities will be expanded over time and the + configuration structures are likely to change. + +.. _config_network_filters_zookeeper_proxy_config: + +Configuration +------------- + +The ZooKeeper proxy filter should be chained with the TCP proxy filter as shown +in the configuration snippet below: + +.. code-block:: yaml + + filter_chains: + - filters: + - name: envoy.filters.network.zookeeper_proxy + config: + stat_prefix: zookeeper + - name: envoy.tcp_proxy + config: + stat_prefix: tcp + cluster: ... + + +.. _config_network_filters_zookeeper_proxy_stats: + +Statistics +---------- + +Every configured ZooKeeper proxy filter has statistics rooted at *zookeeper..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + decoder_error, Counter, Number of times a message wasn't decoded + request_bytes, Counter, Number of bytes in decoded request messages + connect_rq, Counter, Number of regular connect (non-readonly) requests + connect_readonly_rq, Counter, Number of connect requests with the readonly flag set + ping_rq, Counter, Number of ping requests + auth._rq, Counter, Number of auth requests for a given type + getdata_rq, Counter, Number of getdata requests + create_rq, Counter, Number of create requests + create2_rq, Counter, Number of create2 requests + setdata_rq, Counter, Number of setdata requests + getchildren_rq, Counter, Number of getchildren requests + getchildren2_rq, Counter, Number of getchildren2 requests + remove_rq, Counter, Number of delete requests + exists_rq, Counter, Number of stat requests + getacl_rq, Counter, Number of getacl requests + setacl_rq, Counter, Number of setacl requests + sync_rq, Counter, Number of sync requests + multi_rq, Counter, Number of multi transaction requests + reconfig_rq, Counter, Number of reconfig requests + close_rq, Counter, Number of close requests + setwatches_rq, Counter, Number of setwatches requests + checkwatches_rq, Counter, Number of checkwatches requests + removewatches_rq, Counter, Number of removewatches requests + check_rq, Counter, Number of check requests + +.. _config_network_filters_zookeeper_proxy_dynamic_metadata: + +Dynamic Metadata +---------------- + +The ZooKeeper filter emits the following dynamic metadata for each message parsed: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + , string, "The path associated with the request, response or event" + , string, "The opname for the request, response or event" + , string, "The string representation of the flags applied to the znode" + , string, "The size of the request message in bytes" + , string, "True if a watch is being set, false otherwise" + , string, "The version parameter, if any, given with the request" diff --git a/docs/root/configuration/well_known_dynamic_metadata.rst b/docs/root/configuration/well_known_dynamic_metadata.rst index dd11866a42a02..73215617e46db 100644 --- a/docs/root/configuration/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/well_known_dynamic_metadata.rst @@ -17,3 +17,4 @@ The following Envoy filters emit dynamic metadata that other filters can leverag * :ref:`MySQL Proxy Filter ` * :ref:`Role Based Access Control (RBAC) Filter ` * :ref:`Role Based Access Control (RBAC) Network Filter ` +* :ref:`ZooKeeper Proxy Filter ` diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index ec3b38d8353b7..b307ddc5d1bdb 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -71,6 +71,8 @@ Version history * upstream: add cluster factory to allow creating and registering :ref:`custom cluster type`. * tracing: added :ref:`verbose ` to support logging annotations on spans. * upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size` config parameter to strictly bound the ring size. +* zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events). + Refer to ::ref:`ZooKeeper proxy` for more details. * upstream: added configuration option to select any host when the fallback policy fails. * upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. diff --git a/source/common/common/enum_to_int.h b/source/common/common/enum_to_int.h index a9c77b59419ee..ec613ef67df9b 100644 --- a/source/common/common/enum_to_int.h +++ b/source/common/common/enum_to_int.h @@ -6,5 +6,10 @@ namespace Envoy { /** * Convert an int based enum to an int. */ -template uint32_t enumToInt(T val) { return static_cast(val); } +template constexpr uint32_t enumToInt(T val) { return static_cast(val); } + +/** + * Convert an int based enum to a signed int. + */ +template constexpr int32_t enumToSignedInt(T val) { return static_cast(val); } } // namespace Envoy diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index bd3793cc9d16a..f973d63fad0a7 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -77,6 +77,7 @@ EXTENSIONS = { "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", + "envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config", # # Resource monitors @@ -194,6 +195,7 @@ WINDOWS_EXTENSIONS = { "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", #"envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", #"envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", + #"envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config", # # Stat sinks diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index f540aab0d36f7..a1d435f4e7b2a 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -38,6 +38,8 @@ class NetworkFilterNameValues { const std::string Rbac = "envoy.filters.network.rbac"; // SNI Cluster filter const std::string SniCluster = "envoy.filters.network.sni_cluster"; + // ZooKeeper proxy filter + const std::string ZooKeeperProxy = "envoy.filters.network.zookeeper_proxy"; // Converts names from v1 to v2 const Config::V1Converter v1_converter_; diff --git a/source/extensions/filters/network/zookeeper_proxy/BUILD b/source/extensions/filters/network/zookeeper_proxy/BUILD new file mode 100644 index 0000000000000..4fae6bda72674 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/BUILD @@ -0,0 +1,47 @@ +licenses(["notice"]) # Apache 2 + +# ZooKeeper proxy L7 network filter. +# Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "proxy_lib", + srcs = [ + "zookeeper_decoder.cc", + "zookeeper_filter.cc", + "zookeeper_utils.cc", + ], + hdrs = [ + "zookeeper_decoder.h", + "zookeeper_filter.h", + "zookeeper_utils.h", + ], + deps = [ + "//include/envoy/network:filter_interface", + "//include/envoy/server:filter_config_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/config:filter_json_lib", + "//source/common/network:filter_lib", + "//source/extensions/filters/network:well_known_names", + ], +) + +envoy_cc_library( + name = "config", + srcs = ["zookeeper_config.cc"], + hdrs = ["zookeeper_config.h"], + deps = [ + ":proxy_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:factory_base_lib", + "@envoy_api//envoy/config/filter/network/zookeeper_proxy/v1alpha1:zookeeper_proxy_cc", + ], +) diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.cc b/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.cc new file mode 100644 index 0000000000000..7a2bda7a7bcbd --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.cc @@ -0,0 +1,47 @@ +#include "extensions/filters/network/zookeeper_proxy/zookeeper_config.h" + +#include + +#include "envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.pb.validate.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "common/common/logger.h" + +#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * Config registration for the ZooKeeper proxy filter. @see NamedNetworkFilterConfigFactory. + */ +Network::FilterFactoryCb +NetworkFilters::ZooKeeperProxy::ZooKeeperConfigFactory::createFilterFactoryFromProtoTyped( + const envoy::config::filter::network::zookeeper_proxy::v1alpha1::ZooKeeperProxy& proto_config, + Server::Configuration::FactoryContext& context) { + + ASSERT(!proto_config.stat_prefix().empty()); + + const std::string stat_prefix = fmt::format("{}.zookeeper.", proto_config.stat_prefix()); + const uint32_t max_packet_bytes = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, max_packet_bytes, 1024 * 1024); + + ZooKeeperFilterConfigSharedPtr filter_config( + std::make_shared(stat_prefix, max_packet_bytes, context.scope())); + return [filter_config](Network::FilterManager& filter_manager) -> void { + filter_manager.addFilter(std::make_shared(filter_config)); + }; +} + +/** + * Static registration for the ZooKeeper proxy filter. @see RegisterFactory. + */ +REGISTER_FACTORY(ZooKeeperConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory); + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.h b/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.h new file mode 100644 index 0000000000000..2dc1f86ba332c --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.pb.h" +#include "envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.pb.validate.h" + +#include "extensions/filters/network/common/factory_base.h" +#include "extensions/filters/network/well_known_names.h" +#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * Config registration for the ZooKeeper proxy filter. + */ +class ZooKeeperConfigFactory + : public Common::FactoryBase< + envoy::config::filter::network::zookeeper_proxy::v1alpha1::ZooKeeperProxy> { +public: + ZooKeeperConfigFactory() : FactoryBase(NetworkFilterNames::get().ZooKeeperProxy) {} + +private: + Network::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::config::filter::network::zookeeper_proxy::v1alpha1::ZooKeeperProxy& proto_config, + Server::Configuration::FactoryContext& context) override; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.cc b/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.cc new file mode 100644 index 0000000000000..dddd22a0ef634 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.cc @@ -0,0 +1,415 @@ +#include "extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +constexpr uint32_t BOOL_LENGTH = 1; +constexpr uint32_t INT_LENGTH = 4; +constexpr uint32_t LONG_LENGTH = 8; +constexpr uint32_t XID_LENGTH = 4; +constexpr uint32_t OPCODE_LENGTH = 4; +constexpr uint32_t ZXID_LENGTH = 8; +constexpr uint32_t TIMEOUT_LENGTH = 4; +constexpr uint32_t SESSION_LENGTH = 8; +constexpr uint32_t MULTI_HEADER_LENGTH = 9; + +const char* createFlagsToString(CreateFlags flags) { + switch (flags) { + case CreateFlags::PERSISTENT: + return "persistent"; + case CreateFlags::PERSISTENT_SEQUENTIAL: + return "persistent_sequential"; + case CreateFlags::EPHEMERAL: + return "ephemeral"; + case CreateFlags::EPHEMERAL_SEQUENTIAL: + return "ephemeral_sequential"; + case CreateFlags::CONTAINER: + return "container"; + case CreateFlags::PERSISTENT_WITH_TTL: + return "persistent_with_ttl"; + case CreateFlags::PERSISTENT_SEQUENTIAL_WITH_TTL: + return "persistent_sequential_with_ttl"; + } + + return "unknown"; +} + +void DecoderImpl::decode(Buffer::Instance& data, uint64_t& offset) { + ENVOY_LOG(trace, "zookeeper_proxy: decoding {} bytes at offset {}", data.length(), offset); + + // Reset the helper's cursor, to ensure the current message stays within the + // allowed max length, even when it's different than the declared length + // by the message. + // + // Note: we need to keep two cursors — offset and helper_'s internal one — because + // a buffer may contain multiple messages, so offset is global and helper_'s + // internal cursor is reset for each individual message. + helper_.reset(); + + // Check message length. + const int32_t len = helper_.peekInt32(data, offset); + ensureMinLength(len, INT_LENGTH + XID_LENGTH); + ensureMaxLength(len); + + // Control requests, with XIDs <= 0. + // + // These are meant to control the state of a session: + // connect, keep-alive, authenticate and set initial watches. + // + // Note: setWatches is a command historically used to set watches + // right after connecting, typically used when roaming from one + // ZooKeeper server to the next. Thus, the special xid. + // However, some client implementations might expose setWatches + // as a regular data request, so we support that as well. + const int32_t xid = helper_.peekInt32(data, offset); + switch (static_cast(xid)) { + case XidCodes::CONNECT_XID: + parseConnect(data, offset, len); + return; + case XidCodes::PING_XID: + offset += OPCODE_LENGTH; + callbacks_.onPing(); + return; + case XidCodes::AUTH_XID: + parseAuthRequest(data, offset, len); + return; + case XidCodes::SET_WATCHES_XID: + offset += OPCODE_LENGTH; + parseSetWatchesRequest(data, offset, len); + return; + default: + // WATCH_XID is generated by the server, so that and everything + // else can be ignored here. + break; + } + + // Data requests, with XIDs > 0. + // + // These are meant to happen after a successful control request, except + // for two cases: auth requests can happen at any time and ping requests + // must happen every 1/3 of the negotiated session timeout, to keep + // the session alive. + const int32_t opcode = helper_.peekInt32(data, offset); + switch (static_cast(opcode)) { + case OpCodes::GETDATA: + parseGetDataRequest(data, offset, len); + break; + case OpCodes::CREATE: + case OpCodes::CREATE2: + case OpCodes::CREATECONTAINER: + case OpCodes::CREATETTL: + parseCreateRequest(data, offset, len, static_cast(opcode)); + break; + case OpCodes::SETDATA: + parseSetRequest(data, offset, len); + break; + case OpCodes::GETCHILDREN: + parseGetChildrenRequest(data, offset, len, false); + break; + case OpCodes::GETCHILDREN2: + parseGetChildrenRequest(data, offset, len, true); + break; + case OpCodes::DELETE: + parseDeleteRequest(data, offset, len); + break; + case OpCodes::EXISTS: + parseExistsRequest(data, offset, len); + break; + case OpCodes::GETACL: + parseGetAclRequest(data, offset, len); + break; + case OpCodes::SETACL: + parseSetAclRequest(data, offset, len); + break; + case OpCodes::SYNC: + callbacks_.onSyncRequest(pathOnlyRequest(data, offset, len)); + break; + case OpCodes::CHECK: + parseCheckRequest(data, offset, len); + break; + case OpCodes::MULTI: + parseMultiRequest(data, offset, len); + break; + case OpCodes::RECONFIG: + parseReconfigRequest(data, offset, len); + break; + case OpCodes::SETWATCHES: + parseSetWatchesRequest(data, offset, len); + break; + case OpCodes::CHECKWATCHES: + parseXWatchesRequest(data, offset, len, OpCodes::CHECKWATCHES); + break; + case OpCodes::REMOVEWATCHES: + parseXWatchesRequest(data, offset, len, OpCodes::REMOVEWATCHES); + break; + case OpCodes::GETEPHEMERALS: + callbacks_.onGetEphemeralsRequest(pathOnlyRequest(data, offset, len)); + break; + case OpCodes::GETALLCHILDRENNUMBER: + callbacks_.onGetAllChildrenNumberRequest(pathOnlyRequest(data, offset, len)); + break; + case OpCodes::CLOSE: + callbacks_.onCloseRequest(); + break; + default: + throw EnvoyException(fmt::format("Unknown opcode: {}", opcode)); + } +} + +void DecoderImpl::ensureMinLength(const int32_t len, const int32_t minlen) const { + if (len < minlen) { + throw EnvoyException("Packet is too small"); + } +} + +void DecoderImpl::ensureMaxLength(const int32_t len) const { + if (static_cast(len) > max_packet_bytes_) { + throw EnvoyException("Packet is too big"); + } +} + +void DecoderImpl::parseConnect(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + ZXID_LENGTH + TIMEOUT_LENGTH + SESSION_LENGTH + INT_LENGTH); + + // Skip zxid, timeout, and session id. + offset += ZXID_LENGTH + TIMEOUT_LENGTH + SESSION_LENGTH; + + // Skip password. + skipString(data, offset); + + // Read readonly flag, if it's there. + bool readonly{}; + if (data.length() >= offset + 1) { + readonly = helper_.peekBool(data, offset); + } + + callbacks_.onConnect(readonly); +} + +void DecoderImpl::parseAuthRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + INT_LENGTH + INT_LENGTH); + + // Skip opcode + type. + offset += OPCODE_LENGTH + INT_LENGTH; + const std::string scheme = helper_.peekString(data, offset); + // Skip credential. + skipString(data, offset); + + callbacks_.onAuthRequest(scheme); +} + +void DecoderImpl::parseGetDataRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH); + + const std::string path = helper_.peekString(data, offset); + const bool watch = helper_.peekBool(data, offset); + + callbacks_.onGetDataRequest(path, watch); +} + +void DecoderImpl::skipAcls(Buffer::Instance& data, uint64_t& offset) { + const int32_t count = helper_.peekInt32(data, offset); + + for (int i = 0; i < count; ++i) { + // Perms. + helper_.peekInt32(data, offset); + // Skip scheme. + skipString(data, offset); + // Skip cred. + skipString(data, offset); + } +} + +void DecoderImpl::parseCreateRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, + OpCodes opcode) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + + // Skip data. + skipString(data, offset); + skipAcls(data, offset); + + const CreateFlags flags = static_cast(helper_.peekInt32(data, offset)); + callbacks_.onCreateRequest(path, flags, opcode); +} + +void DecoderImpl::parseSetRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + // Skip data. + skipString(data, offset); + // Ignore version. + helper_.peekInt32(data, offset); + + callbacks_.onSetRequest(path); +} + +void DecoderImpl::parseGetChildrenRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, + const bool two) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH); + + const std::string path = helper_.peekString(data, offset); + const bool watch = helper_.peekBool(data, offset); + + callbacks_.onGetChildrenRequest(path, watch, two); +} + +void DecoderImpl::parseDeleteRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + const int32_t version = helper_.peekInt32(data, offset); + + callbacks_.onDeleteRequest(path, version); +} + +void DecoderImpl::parseExistsRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH + BOOL_LENGTH); + + const std::string path = helper_.peekString(data, offset); + const bool watch = helper_.peekBool(data, offset); + + callbacks_.onExistsRequest(path, watch); +} + +void DecoderImpl::parseGetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH); + + const std::string path = helper_.peekString(data, offset); + + callbacks_.onGetAclRequest(path); +} + +void DecoderImpl::parseSetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + skipAcls(data, offset); + const int32_t version = helper_.peekInt32(data, offset); + + callbacks_.onSetAclRequest(path, version); +} + +std::string DecoderImpl::pathOnlyRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + INT_LENGTH); + return helper_.peekString(data, offset); +} + +void DecoderImpl::parseCheckRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + const int32_t version = helper_.peekInt32(data, offset); + + callbacks_.onCheckRequest(path, version); +} + +void DecoderImpl::parseMultiRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + // Treat empty transactions as a decoding error, there should be at least 1 header. + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + MULTI_HEADER_LENGTH); + + while (true) { + const int32_t opcode = helper_.peekInt32(data, offset); + const bool done = helper_.peekBool(data, offset); + // Ignore error field. + helper_.peekInt32(data, offset); + + if (done) { + break; + } + + switch (static_cast(opcode)) { + case OpCodes::CREATE: + parseCreateRequest(data, offset, len, OpCodes::CREATE); + break; + case OpCodes::SETDATA: + parseSetRequest(data, offset, len); + break; + case OpCodes::CHECK: + parseCheckRequest(data, offset, len); + break; + default: + throw EnvoyException(fmt::format("Unknown opcode within a transaction: {}", opcode)); + } + } + + callbacks_.onMultiRequest(); +} + +void DecoderImpl::parseReconfigRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH) + LONG_LENGTH); + + // Skip joining. + skipString(data, offset); + // Skip leaving. + skipString(data, offset); + // Skip new members. + skipString(data, offset); + // Read config id. + helper_.peekInt64(data, offset); + + callbacks_.onReconfigRequest(); +} + +void DecoderImpl::parseSetWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (3 * INT_LENGTH)); + + // Data watches. + skipStrings(data, offset); + // Exist watches. + skipStrings(data, offset); + // Child watches. + skipStrings(data, offset); + + callbacks_.onSetWatchesRequest(); +} + +void DecoderImpl::parseXWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, + OpCodes opcode) { + ensureMinLength(len, XID_LENGTH + OPCODE_LENGTH + (2 * INT_LENGTH)); + + const std::string path = helper_.peekString(data, offset); + const int32_t type = helper_.peekInt32(data, offset); + + if (opcode == OpCodes::CHECKWATCHES) { + callbacks_.onCheckWatchesRequest(path, type); + } else { + callbacks_.onRemoveWatchesRequest(path, type); + } +} + +void DecoderImpl::skipString(Buffer::Instance& data, uint64_t& offset) { + const int32_t slen = helper_.peekInt32(data, offset); + helper_.skip(slen, offset); +} + +void DecoderImpl::skipStrings(Buffer::Instance& data, uint64_t& offset) { + const int32_t count = helper_.peekInt32(data, offset); + + for (int i = 0; i < count; ++i) { + skipString(data, offset); + } +} + +void DecoderImpl::onData(Buffer::Instance& data) { + uint64_t offset = 0; + try { + while (offset < data.length()) { + const uint64_t current = offset; + decode(data, offset); + callbacks_.onRequestBytes(offset - current); + } + } catch (const EnvoyException& e) { + ENVOY_LOG(debug, "zookeeper_proxy: decoding exception {}", e.what()); + callbacks_.onDecodeError(); + } +} + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h b/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h new file mode 100644 index 0000000000000..62144ef91006f --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h @@ -0,0 +1,150 @@ +#pragma once +#include + +#include "envoy/common/platform.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/zookeeper_proxy/zookeeper_utils.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +enum class XidCodes { + CONNECT_XID = 0, + WATCH_XID = -1, + PING_XID = -2, + AUTH_XID = -4, + SET_WATCHES_XID = -8 +}; + +enum class OpCodes { + CONNECT = 0, + CREATE = 1, + DELETE = 2, + EXISTS = 3, + GETDATA = 4, + SETDATA = 5, + GETACL = 6, + SETACL = 7, + GETCHILDREN = 8, + SYNC = 9, + PING = 11, + GETCHILDREN2 = 12, + CHECK = 13, + MULTI = 14, + CREATE2 = 15, + RECONFIG = 16, + CHECKWATCHES = 17, + REMOVEWATCHES = 18, + CREATECONTAINER = 19, + CREATETTL = 21, + CLOSE = -11, + SETAUTH = 100, + SETWATCHES = 101, + GETEPHEMERALS = 103, + GETALLCHILDRENNUMBER = 104 +}; + +enum class WatcherType { CHILDREN = 1, DATA = 2, ANY = 3 }; + +enum class CreateFlags { + PERSISTENT, + PERSISTENT_SEQUENTIAL, + EPHEMERAL, + EPHEMERAL_SEQUENTIAL, + CONTAINER, + PERSISTENT_WITH_TTL, + PERSISTENT_SEQUENTIAL_WITH_TTL +}; + +const char* createFlagsToString(CreateFlags flags); + +/** + * General callbacks for dispatching decoded ZooKeeper messages to a sink. + */ +class DecoderCallbacks { +public: + virtual ~DecoderCallbacks() {} + + virtual void onDecodeError() PURE; + virtual void onRequestBytes(uint64_t bytes) PURE; + virtual void onConnect(bool readonly) PURE; + virtual void onPing() PURE; + virtual void onAuthRequest(const std::string& scheme) PURE; + virtual void onGetDataRequest(const std::string& path, bool watch) PURE; + virtual void onCreateRequest(const std::string& path, CreateFlags flags, OpCodes opcode) PURE; + virtual void onSetRequest(const std::string& path) PURE; + virtual void onGetChildrenRequest(const std::string& path, bool watch, bool v2) PURE; + virtual void onGetEphemeralsRequest(const std::string& path) PURE; + virtual void onGetAllChildrenNumberRequest(const std::string& path) PURE; + virtual void onDeleteRequest(const std::string& path, int32_t version) PURE; + virtual void onExistsRequest(const std::string& path, bool watch) PURE; + virtual void onGetAclRequest(const std::string& path) PURE; + virtual void onSetAclRequest(const std::string& path, int32_t version) PURE; + virtual void onSyncRequest(const std::string& path) PURE; + virtual void onCheckRequest(const std::string& path, int32_t version) PURE; + virtual void onMultiRequest() PURE; + virtual void onReconfigRequest() PURE; + virtual void onSetWatchesRequest() PURE; + virtual void onCheckWatchesRequest(const std::string& path, int32_t type) PURE; + virtual void onRemoveWatchesRequest(const std::string& path, int32_t type) PURE; + virtual void onCloseRequest() PURE; +}; + +/** + * ZooKeeper message decoder. + */ +class Decoder { +public: + virtual ~Decoder() {} + + virtual void onData(Buffer::Instance& data) PURE; +}; + +typedef std::unique_ptr DecoderPtr; + +class DecoderImpl : public Decoder, Logger::Loggable { +public: + explicit DecoderImpl(DecoderCallbacks& callbacks, uint32_t max_packet_bytes) + : callbacks_(callbacks), max_packet_bytes_(max_packet_bytes), helper_(max_packet_bytes) {} + + // ZooKeeperProxy::Decoder + void onData(Buffer::Instance& data) override; + +private: + void decode(Buffer::Instance& data, uint64_t& offset); + void parseConnect(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseAuthRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseGetDataRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseCreateRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, OpCodes opcode); + void skipAcls(Buffer::Instance& data, uint64_t& offset); + void parseSetRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseGetChildrenRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, bool two); + void parseDeleteRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseExistsRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseGetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseSetAclRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseCheckRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseMultiRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseReconfigRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseSetWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + void parseXWatchesRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len, OpCodes opcode); + void skipString(Buffer::Instance& data, uint64_t& offset); + void skipStrings(Buffer::Instance& data, uint64_t& offset); + void ensureMinLength(int32_t len, int32_t minlen) const; + void ensureMaxLength(int32_t len) const; + std::string pathOnlyRequest(Buffer::Instance& data, uint64_t& offset, uint32_t len); + + DecoderCallbacks& callbacks_; + const uint32_t max_packet_bytes_; + BufferHelper helper_; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.cc b/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.cc new file mode 100644 index 0000000000000..ac78aad9c7b71 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.cc @@ -0,0 +1,239 @@ +#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +ZooKeeperFilterConfig::ZooKeeperFilterConfig(const std::string& stat_prefix, + const uint32_t max_packet_bytes, Stats::Scope& scope) + : scope_(scope), max_packet_bytes_(max_packet_bytes), stat_prefix_(stat_prefix), + stats_(generateStats(stat_prefix, scope)) {} + +ZooKeeperFilter::ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config) + : config_(std::move(config)) {} + +void ZooKeeperFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { + read_callbacks_ = &callbacks; +} + +Network::FilterStatus ZooKeeperFilter::onData(Buffer::Instance& data, bool) { + doDecode(data); + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ZooKeeperFilter::onWrite(Buffer::Instance&, bool) { + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ZooKeeperFilter::onNewConnection() { return Network::FilterStatus::Continue; } + +void ZooKeeperFilter::doDecode(Buffer::Instance& buffer) { + clearDynamicMetadata(); + + if (!decoder_) { + decoder_ = createDecoder(*this); + } + + decoder_->onData(buffer); +} + +DecoderPtr ZooKeeperFilter::createDecoder(DecoderCallbacks& callbacks) { + return std::make_unique(callbacks, config_->maxPacketBytes()); +} + +void ZooKeeperFilter::setDynamicMetadata(const std::string& key, const std::string& value) { + setDynamicMetadata({{key, value}}); +} + +void ZooKeeperFilter::clearDynamicMetadata() { + envoy::api::v2::core::Metadata& dynamic_metadata = + read_callbacks_->connection().streamInfo().dynamicMetadata(); + auto& metadata = + (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().ZooKeeperProxy]; + metadata.mutable_fields()->clear(); +} + +void ZooKeeperFilter::setDynamicMetadata( + const std::vector>& data) { + envoy::api::v2::core::Metadata& dynamic_metadata = + read_callbacks_->connection().streamInfo().dynamicMetadata(); + ProtobufWkt::Struct metadata( + (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().ZooKeeperProxy]); + auto& fields = *metadata.mutable_fields(); + + for (const auto& pair : data) { + auto val = ProtobufWkt::Value(); + val.set_string_value(pair.second); + fields.insert({pair.first, val}); + } + + read_callbacks_->connection().streamInfo().setDynamicMetadata( + NetworkFilterNames::get().ZooKeeperProxy, metadata); +} + +void ZooKeeperFilter::onConnect(const bool readonly) { + if (readonly) { + config_->stats_.connect_readonly_rq_.inc(); + setDynamicMetadata("opname", "connect_readonly"); + } else { + config_->stats_.connect_rq_.inc(); + setDynamicMetadata("opname", "connect"); + } +} + +void ZooKeeperFilter::onDecodeError() { + config_->stats_.decoder_error_.inc(); + setDynamicMetadata("opname", "error"); +} + +void ZooKeeperFilter::onRequestBytes(const uint64_t bytes) { + config_->stats_.request_bytes_.add(bytes); + setDynamicMetadata("bytes", std::to_string(bytes)); +} + +void ZooKeeperFilter::onPing() { + config_->stats_.ping_rq_.inc(); + setDynamicMetadata("opname", "ping"); +} + +void ZooKeeperFilter::onAuthRequest(const std::string& scheme) { + config_->scope_.counter(fmt::format("{}.auth.{}_rq", config_->stat_prefix_, scheme)).inc(); + setDynamicMetadata("opname", "auth"); +} + +void ZooKeeperFilter::onGetDataRequest(const std::string& path, const bool watch) { + config_->stats_.getdata_rq_.inc(); + setDynamicMetadata({{"opname", "getdata"}, {"path", path}, {"watch", watch ? "true" : "false"}}); +} + +void ZooKeeperFilter::onCreateRequest(const std::string& path, const CreateFlags flags, + const OpCodes opcode) { + std::string opname; + + switch (opcode) { + case OpCodes::CREATE: + opname = "create"; + config_->stats_.create_rq_.inc(); + break; + case OpCodes::CREATE2: + opname = "create2"; + config_->stats_.create2_rq_.inc(); + break; + case OpCodes::CREATECONTAINER: + opname = "createcontainer"; + config_->stats_.createcontainer_rq_.inc(); + break; + case OpCodes::CREATETTL: + opname = "createttl"; + config_->stats_.createttl_rq_.inc(); + break; + default: + throw EnvoyException(fmt::format("Unknown opcode: {}", enumToSignedInt(opcode))); + break; + } + + setDynamicMetadata( + {{"opname", opname}, {"path", path}, {"create_type", createFlagsToString(flags)}}); +} + +void ZooKeeperFilter::onSetRequest(const std::string& path) { + config_->stats_.setdata_rq_.inc(); + setDynamicMetadata({{"opname", "setdata"}, {"path", path}}); +} + +void ZooKeeperFilter::onGetChildrenRequest(const std::string& path, const bool watch, + const bool v2) { + std::string opname = "getchildren"; + + if (v2) { + config_->stats_.getchildren2_rq_.inc(); + opname = "getchildren2"; + } else { + config_->stats_.getchildren_rq_.inc(); + } + + setDynamicMetadata({{"opname", opname}, {"path", path}, {"watch", watch ? "true" : "false"}}); +} + +void ZooKeeperFilter::onDeleteRequest(const std::string& path, const int32_t version) { + config_->stats_.remove_rq_.inc(); + setDynamicMetadata({{"opname", "remove"}, {"path", path}, {"version", std::to_string(version)}}); +} + +void ZooKeeperFilter::onExistsRequest(const std::string& path, const bool watch) { + config_->stats_.exists_rq_.inc(); + setDynamicMetadata({{"opname", "exists"}, {"path", path}, {"watch", watch ? "true" : "false"}}); +} + +void ZooKeeperFilter::onGetAclRequest(const std::string& path) { + config_->stats_.getacl_rq_.inc(); + setDynamicMetadata({{"opname", "getacl"}, {"path", path}}); +} + +void ZooKeeperFilter::onSetAclRequest(const std::string& path, const int32_t version) { + config_->stats_.setacl_rq_.inc(); + setDynamicMetadata({{"opname", "setacl"}, {"path", path}, {"version", std::to_string(version)}}); +} + +void ZooKeeperFilter::onSyncRequest(const std::string& path) { + config_->stats_.sync_rq_.inc(); + setDynamicMetadata({{"opname", "sync"}, {"path", path}}); +} + +void ZooKeeperFilter::onCheckRequest(const std::string&, const int32_t) { + config_->stats_.check_rq_.inc(); +} + +void ZooKeeperFilter::onCheckWatchesRequest(const std::string& path, const int32_t) { + config_->stats_.checkwatches_rq_.inc(); + setDynamicMetadata({{"opname", "checkwatches"}, {"path", path}}); +} + +void ZooKeeperFilter::onRemoveWatchesRequest(const std::string& path, const int32_t) { + config_->stats_.removewatches_rq_.inc(); + setDynamicMetadata({{"opname", "removewatches"}, {"path", path}}); +} + +void ZooKeeperFilter::onMultiRequest() { + config_->stats_.multi_rq_.inc(); + setDynamicMetadata("opname", "multi"); +} + +void ZooKeeperFilter::onReconfigRequest() { + config_->stats_.reconfig_rq_.inc(); + setDynamicMetadata("opname", "reconfig"); +} + +void ZooKeeperFilter::onSetWatchesRequest() { + config_->stats_.setwatches_rq_.inc(); + setDynamicMetadata("opname", "setwatches"); +} + +void ZooKeeperFilter::onGetEphemeralsRequest(const std::string& path) { + config_->stats_.getephemerals_rq_.inc(); + setDynamicMetadata({{"opname", "getephemerals"}, {"path", path}}); +} + +void ZooKeeperFilter::onGetAllChildrenNumberRequest(const std::string& path) { + config_->stats_.getallchildrennumber_rq_.inc(); + setDynamicMetadata({{"opname", "getallchildrennumber"}, {"path", path}}); +} + +void ZooKeeperFilter::onCloseRequest() { + config_->stats_.close_rq_.inc(); + setDynamicMetadata("opname", "close"); +} + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.h b/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.h new file mode 100644 index 0000000000000..20cdfec0a8f40 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.h @@ -0,0 +1,141 @@ +#pragma once + +#include "envoy/access_log/access_log.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/logger.h" + +#include "extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * All ZooKeeper proxy stats. @see stats_macros.h + */ +// clang-format off +#define ALL_ZOOKEEPER_PROXY_STATS(COUNTER) \ + COUNTER(decoder_error) \ + COUNTER(request_bytes) \ + COUNTER(connect_rq) \ + COUNTER(connect_readonly_rq) \ + COUNTER(getdata_rq) \ + COUNTER(create_rq) \ + COUNTER(create2_rq) \ + COUNTER(createcontainer_rq) \ + COUNTER(createttl_rq) \ + COUNTER(setdata_rq) \ + COUNTER(getchildren_rq) \ + COUNTER(getchildren2_rq) \ + COUNTER(getephemerals_rq) \ + COUNTER(getallchildrennumber_rq) \ + COUNTER(remove_rq) \ + COUNTER(exists_rq) \ + COUNTER(getacl_rq) \ + COUNTER(setacl_rq) \ + COUNTER(sync_rq) \ + COUNTER(ping_rq) \ + COUNTER(multi_rq) \ + COUNTER(reconfig_rq) \ + COUNTER(close_rq) \ + COUNTER(setauth_rq) \ + COUNTER(setwatches_rq) \ + COUNTER(checkwatches_rq) \ + COUNTER(removewatches_rq) \ + COUNTER(check_rq) \ +// clang-format on + +/** + * Struct definition for all ZooKeeper proxy stats. @see stats_macros.h + */ +struct ZooKeeperProxyStats { + ALL_ZOOKEEPER_PROXY_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Configuration for the ZooKeeper proxy filter. + */ +class ZooKeeperFilterConfig { +public: + ZooKeeperFilterConfig(const std::string &stat_prefix, uint32_t max_packet_bytes, Stats::Scope& scope); + + const ZooKeeperProxyStats& stats() { return stats_; } + uint32_t maxPacketBytes() const { return max_packet_bytes_; } + + Stats::Scope& scope_; + const uint32_t max_packet_bytes_; + const std::string stat_prefix_; + ZooKeeperProxyStats stats_; + +private: + ZooKeeperProxyStats generateStats(const std::string& prefix, + Stats::Scope& scope) { + return ZooKeeperProxyStats{ + ALL_ZOOKEEPER_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } +}; + +using ZooKeeperFilterConfigSharedPtr = std::shared_ptr; + +/** + * Implementation of ZooKeeper proxy filter. + */ +class ZooKeeperFilter : public Network::Filter, DecoderCallbacks, Logger::Loggable { +public: + explicit ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config); + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; + + // Network::WriteFilter + Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; + + // ZooKeeperProxy::DecoderCallback + void onDecodeError() override; + void onRequestBytes(uint64_t bytes) override; + void onConnect(bool readonly) override; + void onPing() override; + void onAuthRequest(const std::string& scheme) override; + void onGetDataRequest(const std::string& path, bool watch) override; + void onCreateRequest(const std::string& path, CreateFlags flags, OpCodes opcode) override; + void onSetRequest(const std::string& path) override; + void onGetChildrenRequest(const std::string& path, bool watch, bool v2) override; + void onDeleteRequest(const std::string& path, int32_t version) override; + void onExistsRequest(const std::string& path, bool watch) override; + void onGetAclRequest(const std::string& path) override; + void onSetAclRequest(const std::string& path, int32_t version) override; + void onSyncRequest(const std::string& path) override; + void onCheckRequest(const std::string& path, int32_t version) override; + void onMultiRequest() override; + void onReconfigRequest() override; + void onSetWatchesRequest() override; + void onCheckWatchesRequest(const std::string& path, int32_t type) override; + void onRemoveWatchesRequest(const std::string& path, int32_t type) override; + void onGetEphemeralsRequest(const std::string& path) override; + void onGetAllChildrenNumberRequest(const std::string& path) override; + void onCloseRequest() override; + + void doDecode(Buffer::Instance& buffer); + DecoderPtr createDecoder(DecoderCallbacks& callbacks); + void setDynamicMetadata(const std::string& key, const std::string& value); + void setDynamicMetadata(const std::vector>& data); + void clearDynamicMetadata(); + +private: + Network::ReadFilterCallbacks* read_callbacks_{}; + ZooKeeperFilterConfigSharedPtr config_; + std::unique_ptr decoder_; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.cc b/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.cc new file mode 100644 index 0000000000000..1a4ad1c7af4d2 --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.cc @@ -0,0 +1,71 @@ +#include "extensions/filters/network/zookeeper_proxy/zookeeper_utils.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +int32_t BufferHelper::peekInt32(Buffer::Instance& buffer, uint64_t& offset) { + ensureMaxLen(sizeof(int32_t)); + + int32_t val = buffer.peekBEInt(offset); + offset += sizeof(int32_t); + return val; +} + +int64_t BufferHelper::peekInt64(Buffer::Instance& buffer, uint64_t& offset) { + ensureMaxLen(sizeof(int64_t)); + + int64_t val = buffer.peekBEInt(offset); + offset += sizeof(int64_t); + return val; +} + +bool BufferHelper::peekBool(Buffer::Instance& buffer, uint64_t& offset) { + ensureMaxLen(1); + + const char byte = buffer.peekInt(offset); + const bool val = static_cast(byte); + offset += 1; + return val; +} + +std::string BufferHelper::peekString(Buffer::Instance& buffer, uint64_t& offset) { + std::string val; + uint32_t len = peekInt32(buffer, offset); + + if (len == 0) { + return val; + } + + if (buffer.length() < (offset + len)) { + throw EnvoyException("peekString: buffer is smaller than string length"); + } + + ensureMaxLen(len); + + std::unique_ptr data(new char[len]); + buffer.copyOut(offset, len, data.get()); + val.assign(data.get(), len); + offset += len; + + return val; +} + +void BufferHelper::skip(const uint32_t len, uint64_t& offset) { + offset += len; + current_ += len; +} + +void BufferHelper::ensureMaxLen(const uint32_t size) { + current_ += size; + + if (current_ > max_len_) { + throw EnvoyException("read beyond max length"); + } +} + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.h b/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.h new file mode 100644 index 0000000000000..559ef0f63093d --- /dev/null +++ b/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.h @@ -0,0 +1,45 @@ +#pragma once +#include + +#include "envoy/common/platform.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/byte_order.h" +#include "common/common/logger.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +/** + * Helper for extracting ZooKeeper data from a buffer. + * + * If at any point a peek is tried beyond max_len, an EnvoyException + * will be thrown. This is important to protect Envoy against malformed + * requests (e.g.: when the declared and actual length don't match). + * + * Note: ZooKeeper's protocol uses network byte ordering (big-endian). + */ +class BufferHelper : public Logger::Loggable { +public: + BufferHelper(uint32_t max_len) : max_len_(max_len) {} + + int32_t peekInt32(Buffer::Instance& buffer, uint64_t& offset); + int64_t peekInt64(Buffer::Instance& buffer, uint64_t& offset); + std::string peekString(Buffer::Instance& buffer, uint64_t& offset); + bool peekBool(Buffer::Instance& buffer, uint64_t& offset); + void skip(uint32_t len, uint64_t& offset); + void reset() { current_ = 0; } + +private: + void ensureMaxLen(uint32_t size); + + uint32_t max_len_; + uint32_t current_{}; +}; + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/zookeeper_proxy/BUILD b/test/extensions/filters/network/zookeeper_proxy/BUILD new file mode 100644 index 0000000000000..81af4151cf112 --- /dev/null +++ b/test/extensions/filters/network/zookeeper_proxy/BUILD @@ -0,0 +1,27 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_mock", + "envoy_extension_cc_test", + "envoy_extension_cc_test_library", +) + +envoy_package() + +envoy_extension_cc_test( + name = "zookeeper_filter_test", + srcs = [ + "zookeeper_filter_test.cc", + ], + extension_name = "envoy.filters.network.zookeeper_proxy", + deps = [ + "//source/extensions/filters/network/zookeeper_proxy:config", + "//test/mocks/network:network_mocks", + ], +) diff --git a/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc b/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc new file mode 100644 index 0000000000000..182ea1bba5700 --- /dev/null +++ b/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc @@ -0,0 +1,874 @@ +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h" +#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ZooKeeperProxy { + +bool protoMapEq(const ProtobufWkt::Struct& obj, const std::map& rhs) { + EXPECT_TRUE(rhs.size() > 0); + for (auto const& entry : rhs) { + EXPECT_EQ(obj.fields().at(entry.first).string_value(), entry.second); + } + return true; +} + +MATCHER_P(MapEq, rhs, "") { return protoMapEq(arg, rhs); } + +class ZooKeeperFilterTest : public testing::Test { +public: + ZooKeeperFilterTest() { ENVOY_LOG_MISC(info, "test"); } + + void initialize() { + config_ = std::make_shared(stat_prefix_, 1048576, scope_); + filter_ = std::make_unique(config_); + filter_->initializeReadFilterCallbacks(filter_callbacks_); + } + + Buffer::OwnedImpl encodeConnect(const bool readonly = false, const uint64_t zxid = 100, + const uint32_t session_timeout = 10, + const uint32_t session_id = 200, + const std::string& passwd = "") const { + Buffer::OwnedImpl buffer; + const uint32_t message_size = readonly ? 28 + passwd.length() + 1 : 28 + passwd.length(); + + buffer.writeBEInt(message_size); + buffer.writeBEInt(0); // Protocol version. + buffer.writeBEInt(zxid); + buffer.writeBEInt(session_timeout); + buffer.writeBEInt(session_id); + addString(buffer, passwd); + + if (readonly) { + const char readonly_flag = 0b1; + buffer.add(std::string(1, readonly_flag)); + } + + return buffer; + } + + Buffer::OwnedImpl encodeBadMessage() const { + Buffer::OwnedImpl buffer; + + // Bad length. + buffer.writeBEInt(1); + // Trailing int. + buffer.writeBEInt(3); + + return buffer; + } + + Buffer::OwnedImpl encodeTooBigMessage() const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(1048577); + + return buffer; + } + + Buffer::OwnedImpl encodeBiggerThanLengthMessage() const { + Buffer::OwnedImpl buffer; + + // Craft a delete request with a path that's longer than + // the declared message length. + buffer.writeBEInt(50); + buffer.writeBEInt(1000); + // Opcode. + buffer.writeBEInt(enumToSignedInt(OpCodes::DELETE)); + // Path. + addString(buffer, std::string(2 * 1024 * 1024, '*')); + // Version. + buffer.writeBEInt(-1); + + return buffer; + } + + Buffer::OwnedImpl encodePing() const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(8); + buffer.writeBEInt(enumToSignedInt(XidCodes::PING_XID)); + buffer.writeBEInt(enumToInt(OpCodes::PING)); + + return buffer; + } + + Buffer::OwnedImpl encodeUnknownOpcode() const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(8); + buffer.writeBEInt(1000); + buffer.writeBEInt(200); + + return buffer; + } + + Buffer::OwnedImpl encodeCloseRequest() const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(8); + buffer.writeBEInt(1000); + buffer.writeBEInt(enumToSignedInt(OpCodes::CLOSE)); + + return buffer; + } + + Buffer::OwnedImpl encodeAuth(const std::string& scheme) const { + const std::string credential = "p@sswd"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(28 + scheme.length() + credential.length()); + buffer.writeBEInt(enumToSignedInt(XidCodes::AUTH_XID)); + buffer.writeBEInt(enumToSignedInt(OpCodes::SETAUTH)); + // Type. + buffer.writeBEInt(0); + addString(buffer, scheme); + addString(buffer, credential); + + return buffer; + } + + Buffer::OwnedImpl + encodePathWatch(const std::string& path, const bool watch, + const int32_t opcode = enumToSignedInt(OpCodes::GETDATA)) const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(13 + path.length()); + buffer.writeBEInt(1000); + // Opcode. + buffer.writeBEInt(opcode); + // Path. + addString(buffer, path); + // Watch. + const char watch_flag = watch ? 0b1 : 0b0; + buffer.add(std::string(1, watch_flag)); + + return buffer; + } + + Buffer::OwnedImpl encodePathVersion(const std::string& path, const int32_t version, + const int32_t opcode = enumToSignedInt(OpCodes::GETDATA), + const bool txn = false) const { + Buffer::OwnedImpl buffer; + + if (!txn) { + buffer.writeBEInt(16 + path.length()); + buffer.writeBEInt(1000); + buffer.writeBEInt(opcode); + } + + // Path. + addString(buffer, path); + // Version + buffer.writeBEInt(version); + + return buffer; + } + + Buffer::OwnedImpl encodePath(const std::string& path, const int32_t opcode) const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(8 + path.length()); + buffer.writeBEInt(1000); + // Opcode. + buffer.writeBEInt(opcode); + // Path. + addString(buffer, path); + + return buffer; + } + + Buffer::OwnedImpl encodePathLongerThanBuffer(const std::string& path, + const int32_t opcode) const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(8 + path.length()); + buffer.writeBEInt(1000); + buffer.writeBEInt(opcode); + buffer.writeBEInt(path.length() * 2); + buffer.add(path); + + return buffer; + } + + Buffer::OwnedImpl + encodeCreateRequest(const std::string& path, const std::string& data, const CreateFlags flags, + const bool txn = false, + const int32_t opcode = enumToSignedInt(OpCodes::CREATE)) const { + Buffer::OwnedImpl buffer; + + if (!txn) { + buffer.writeBEInt(24 + path.length() + data.length()); + buffer.writeBEInt(1000); + buffer.writeBEInt(opcode); + } + + // Path. + addString(buffer, path); + // Data. + addString(buffer, data); + // Acls. + buffer.writeBEInt(0); + // Flags. + buffer.writeBEInt(static_cast(flags)); + + return buffer; + } + + Buffer::OwnedImpl encodeSetRequest(const std::string& path, const std::string& data, + const int32_t version, const bool txn = false) const { + Buffer::OwnedImpl buffer; + + if (!txn) { + buffer.writeBEInt(20 + path.length() + data.length()); + buffer.writeBEInt(1000); + buffer.writeBEInt(enumToSignedInt(OpCodes::SETDATA)); + } + + // Path. + addString(buffer, path); + // Data. + addString(buffer, data); + // Version. + buffer.writeBEInt(version); + + return buffer; + } + + Buffer::OwnedImpl encodeDeleteRequest(const std::string& path, const int32_t version) const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(16 + path.length()); + buffer.writeBEInt(1000); + // Opcode. + buffer.writeBEInt(enumToSignedInt(OpCodes::DELETE)); + // Path. + addString(buffer, path); + // Version. + buffer.writeBEInt(version); + + return buffer; + } + + Buffer::OwnedImpl encodeSetAclRequest(const std::string& path, const std::string& scheme, + const std::string& credential, + const int32_t version) const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(32 + path.length() + scheme.length() + credential.length()); + buffer.writeBEInt(1000); + // Opcode. + buffer.writeBEInt(enumToSignedInt(OpCodes::SETACL)); + // Path. + addString(buffer, path); + + // Acls. + buffer.writeBEInt(1); + // Type. + buffer.writeBEInt(0); + // Scheme. + addString(buffer, scheme); + // Credential. + addString(buffer, credential); + + // Version. + buffer.writeBEInt(version); + + return buffer; + } + + Buffer::OwnedImpl encodeReconfigRequest(const std::string& joining, const std::string& leaving, + const std::string& new_members, int64_t config_id) const { + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(28 + joining.length() + leaving.length() + new_members.length()); + buffer.writeBEInt(1000); + buffer.writeBEInt(enumToSignedInt(OpCodes::RECONFIG)); + addString(buffer, joining); + addString(buffer, leaving); + addString(buffer, new_members); + buffer.writeBEInt(config_id); + + return buffer; + } + + Buffer::OwnedImpl encodeSetWatchesRequest(const std::vector& dataw, + const std::vector& existw, + const std::vector& childw, + int32_t xid = 1000) const { + Buffer::OwnedImpl buffer; + Buffer::OwnedImpl watches_buffer; + + addStrings(watches_buffer, dataw); + addStrings(watches_buffer, existw); + addStrings(watches_buffer, childw); + + buffer.writeBEInt(8 + watches_buffer.length()); + buffer.writeBEInt(xid); + buffer.writeBEInt(enumToSignedInt(OpCodes::SETWATCHES)); + buffer.add(watches_buffer); + + return buffer; + } + + Buffer::OwnedImpl + encodeMultiRequest(const std::vector>& ops) const { + Buffer::OwnedImpl buffer; + Buffer::OwnedImpl requests; + + for (const auto& op_pair : ops) { + // Header. + requests.writeBEInt(op_pair.first); + requests.add(std::string(1, 0b0)); + requests.writeBEInt(-1); + + // Payload. + requests.add(op_pair.second); + } + + // Done header. + requests.writeBEInt(-1); + requests.add(std::string(1, 0b1)); + requests.writeBEInt(-1); + + // Multi prefix. + buffer.writeBEInt(8 + requests.length()); + buffer.writeBEInt(1000); + buffer.writeBEInt(enumToSignedInt(OpCodes::MULTI)); + + // Requests. + buffer.add(requests); + + return buffer; + } + + void addString(Buffer::OwnedImpl& buffer, const std::string& str) const { + buffer.writeBEInt(str.length()); + buffer.add(str); + } + + void addStrings(Buffer::OwnedImpl& buffer, const std::vector& watches) const { + buffer.writeBEInt(watches.size()); + + for (const auto& watch : watches) { + addString(buffer, watch); + } + } + + void expectSetDynamicMetadata(const std::map& expected) { + EXPECT_CALL(filter_callbacks_.connection_, streamInfo()) + .WillRepeatedly(ReturnRef(stream_info_)); + EXPECT_CALL(stream_info_, + setDynamicMetadata("envoy.filters.network.zookeeper_proxy", MapEq(expected))); + } + + void expectSetDynamicMetadata(const std::map& first, + const std::map& second) { + EXPECT_CALL(filter_callbacks_.connection_, streamInfo()) + .WillRepeatedly(ReturnRef(stream_info_)); + EXPECT_CALL(stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([first](const std::string& key, const ProtobufWkt::Struct& obj) -> void { + EXPECT_STREQ(key.c_str(), "envoy.filters.network.zookeeper_proxy"); + protoMapEq(obj, first); + })) + .WillOnce(Invoke([second](const std::string& key, const ProtobufWkt::Struct& obj) -> void { + EXPECT_STREQ(key.c_str(), "envoy.filters.network.zookeeper_proxy"); + protoMapEq(obj, second); + })); + } + + void testCreate(CreateFlags flags, const OpCodes opcode = OpCodes::CREATE) { + initialize(); + Buffer::OwnedImpl data = + encodeCreateRequest("/foo", "bar", flags, false, enumToSignedInt(opcode)); + std::string opname = "create"; + + switch (opcode) { + case OpCodes::CREATECONTAINER: + opname = "createcontainer"; + break; + case OpCodes::CREATETTL: + opname = "createttl"; + break; + default: + break; + } + + expectSetDynamicMetadata( + {{"opname", opname}, {"path", "/foo"}, {"create_type", createFlagsToString(flags)}}, + {{"bytes", "35"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + + switch (opcode) { + case OpCodes::CREATE: + EXPECT_EQ(1UL, config_->stats().create_rq_.value()); + break; + case OpCodes::CREATECONTAINER: + EXPECT_EQ(1UL, config_->stats().createcontainer_rq_.value()); + break; + case OpCodes::CREATETTL: + EXPECT_EQ(1UL, config_->stats().createttl_rq_.value()); + break; + default: + break; + } + + EXPECT_EQ(35UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); + } + + ZooKeeperFilterConfigSharedPtr config_; + std::unique_ptr filter_; + Stats::IsolatedStoreImpl scope_; + std::string stat_prefix_{"test.zookeeper"}; + NiceMock filter_callbacks_; + NiceMock stream_info_; +}; + +TEST_F(ZooKeeperFilterTest, Connect) { + initialize(); + + Buffer::OwnedImpl data = encodeConnect(); + + expectSetDynamicMetadata({{"opname", "connect"}}, {{"bytes", "32"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().connect_rq_.value()); + EXPECT_EQ(32UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, ConnectReadonly) { + initialize(); + + Buffer::OwnedImpl data = encodeConnect(true); + + expectSetDynamicMetadata({{"opname", "connect_readonly"}}, {{"bytes", "33"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(0UL, config_->stats().connect_rq_.value()); + EXPECT_EQ(1UL, config_->stats().connect_readonly_rq_.value()); + EXPECT_EQ(33UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, Fallback) { + initialize(); + + Buffer::OwnedImpl data = encodeBadMessage(); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(0UL, config_->stats().connect_rq_.value()); + EXPECT_EQ(0UL, config_->stats().connect_readonly_rq_.value()); + EXPECT_EQ(1UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, PacketTooBig) { + initialize(); + + Buffer::OwnedImpl data = encodeTooBigMessage(); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, PacketBiggerThanLength) { + initialize(); + + Buffer::OwnedImpl data = encodeBiggerThanLengthMessage(); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, UnknownOpcode) { + initialize(); + + Buffer::OwnedImpl data = encodeUnknownOpcode(); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, BufferSmallerThanStringLength) { + initialize(); + + Buffer::OwnedImpl data = encodePathLongerThanBuffer("/foo", enumToSignedInt(OpCodes::SYNC)); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, PingRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePing(); + + expectSetDynamicMetadata({{"opname", "ping"}}, {{"bytes", "12"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().ping_rq_.value()); + EXPECT_EQ(12UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, AuthRequest) { + initialize(); + + Buffer::OwnedImpl data = encodeAuth("digest"); + + expectSetDynamicMetadata({{"opname", "auth"}}, {{"bytes", "36"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(scope_.counter("test.zookeeper.auth.digest_rq").value(), 1); + EXPECT_EQ(36UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, GetDataRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePathWatch("/foo", true); + + expectSetDynamicMetadata({{"opname", "getdata"}, {"path", "/foo"}, {"watch", "true"}}, + {{"bytes", "21"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(21UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(1UL, config_->stats().getdata_rq_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, GetDataRequestEmptyPath) { + initialize(); + + // It's valid to see an empty string as the path, which gets treated as / + // by the server. + Buffer::OwnedImpl data = encodePathWatch("", true); + + expectSetDynamicMetadata({{"opname", "getdata"}, {"path", ""}, {"watch", "true"}}, + {{"bytes", "17"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(17UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(1UL, config_->stats().getdata_rq_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, CreateRequestPersistent) { testCreate(CreateFlags::PERSISTENT); } + +TEST_F(ZooKeeperFilterTest, CreateRequestPersistentSequential) { + testCreate(CreateFlags::PERSISTENT_SEQUENTIAL); +} + +TEST_F(ZooKeeperFilterTest, CreateRequestEphemeral) { testCreate(CreateFlags::EPHEMERAL); } + +TEST_F(ZooKeeperFilterTest, CreateRequestEphemeralSequential) { + testCreate(CreateFlags::EPHEMERAL_SEQUENTIAL); +} + +TEST_F(ZooKeeperFilterTest, CreateRequestContainer) { + testCreate(CreateFlags::CONTAINER, OpCodes::CREATECONTAINER); +} + +TEST_F(ZooKeeperFilterTest, CreateRequestTTL) { + testCreate(CreateFlags::PERSISTENT_WITH_TTL, OpCodes::CREATETTL); +} + +TEST_F(ZooKeeperFilterTest, CreateRequestTTLSequential) { + testCreate(CreateFlags::PERSISTENT_SEQUENTIAL_WITH_TTL); +} + +TEST_F(ZooKeeperFilterTest, CreateRequest2) { + initialize(); + + Buffer::OwnedImpl data = encodeCreateRequest("/foo", "bar", CreateFlags::PERSISTENT, false, + enumToSignedInt(OpCodes::CREATE2)); + + expectSetDynamicMetadata({{"opname", "create2"}, {"path", "/foo"}, {"create_type", "persistent"}}, + {{"bytes", "35"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().create2_rq_.value()); + EXPECT_EQ(35UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, SetRequest) { + initialize(); + + Buffer::OwnedImpl data = encodeSetRequest("/foo", "bar", -1); + + expectSetDynamicMetadata({{"opname", "setdata"}, {"path", "/foo"}}, {{"bytes", "31"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().setdata_rq_.value()); + EXPECT_EQ(31UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, GetChildrenRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePathWatch("/foo", false, enumToSignedInt(OpCodes::GETCHILDREN)); + + expectSetDynamicMetadata({{"opname", "getchildren"}, {"path", "/foo"}, {"watch", "false"}}, + {{"bytes", "21"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().getchildren_rq_.value()); + EXPECT_EQ(21UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, GetChildrenRequest2) { + initialize(); + + Buffer::OwnedImpl data = encodePathWatch("/foo", false, enumToSignedInt(OpCodes::GETCHILDREN2)); + + expectSetDynamicMetadata({{"opname", "getchildren2"}, {"path", "/foo"}, {"watch", "false"}}, + {{"bytes", "21"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().getchildren2_rq_.value()); + EXPECT_EQ(21UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, DeleteRequest) { + initialize(); + + Buffer::OwnedImpl data = encodeDeleteRequest("/foo", -1); + + expectSetDynamicMetadata({{"opname", "remove"}, {"path", "/foo"}, {"version", "-1"}}, + {{"bytes", "24"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().remove_rq_.value()); + EXPECT_EQ(24UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, ExistsRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePathWatch("/foo", false, enumToSignedInt(OpCodes::EXISTS)); + + expectSetDynamicMetadata({{"opname", "exists"}, {"path", "/foo"}, {"watch", "false"}}, + {{"bytes", "21"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().exists_rq_.value()); + EXPECT_EQ(21UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, GetAclRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePath("/foo", enumToSignedInt(OpCodes::GETACL)); + + expectSetDynamicMetadata({{"opname", "getacl"}, {"path", "/foo"}}, {{"bytes", "20"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().getacl_rq_.value()); + EXPECT_EQ(20UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, SetAclRequest) { + initialize(); + + Buffer::OwnedImpl data = encodeSetAclRequest("/foo", "digest", "passwd", -1); + + expectSetDynamicMetadata({{"opname", "setacl"}, {"path", "/foo"}, {"version", "-1"}}, + {{"bytes", "52"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().setacl_rq_.value()); + EXPECT_EQ(52UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, SyncRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePath("/foo", enumToSignedInt(OpCodes::SYNC)); + + expectSetDynamicMetadata({{"opname", "sync"}, {"path", "/foo"}}, {{"bytes", "20"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().sync_rq_.value()); + EXPECT_EQ(20UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, GetEphemeralsRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePath("/foo", enumToSignedInt(OpCodes::GETEPHEMERALS)); + + expectSetDynamicMetadata({{"opname", "getephemerals"}, {"path", "/foo"}}, {{"bytes", "20"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().getephemerals_rq_.value()); + EXPECT_EQ(20UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, GetAllChildrenNumberRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePath("/foo", enumToSignedInt(OpCodes::GETALLCHILDRENNUMBER)); + + expectSetDynamicMetadata({{"opname", "getallchildrennumber"}, {"path", "/foo"}}, + {{"bytes", "20"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().getallchildrennumber_rq_.value()); + EXPECT_EQ(20UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, CheckRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePathVersion("/foo", 100, enumToSignedInt(OpCodes::CHECK)); + + expectSetDynamicMetadata({{"bytes", "24"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().check_rq_.value()); + EXPECT_EQ(24UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, MultiRequest) { + initialize(); + + Buffer::OwnedImpl create1 = encodeCreateRequest("/foo", "1", CreateFlags::PERSISTENT, true); + Buffer::OwnedImpl create2 = encodeCreateRequest("/bar", "1", CreateFlags::PERSISTENT, true); + Buffer::OwnedImpl check1 = encodePathVersion("/foo", 100, enumToSignedInt(OpCodes::CHECK), true); + Buffer::OwnedImpl set1 = encodeSetRequest("/bar", "2", -1, true); + + std::vector> ops; + ops.push_back(std::make_pair(enumToSignedInt(OpCodes::CREATE), std::move(create1))); + ops.push_back(std::make_pair(enumToSignedInt(OpCodes::CREATE), std::move(create2))); + ops.push_back(std::make_pair(enumToSignedInt(OpCodes::CHECK), std::move(check1))); + ops.push_back(std::make_pair(enumToSignedInt(OpCodes::SETDATA), std::move(set1))); + + Buffer::OwnedImpl data = encodeMultiRequest(ops); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().multi_rq_.value()); + EXPECT_EQ(128UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(2UL, config_->stats().create_rq_.value()); + EXPECT_EQ(1UL, config_->stats().setdata_rq_.value()); + EXPECT_EQ(1UL, config_->stats().check_rq_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, ReconfigRequest) { + initialize(); + + Buffer::OwnedImpl data = encodeReconfigRequest("s1", "s2", "s3", 1000); + + expectSetDynamicMetadata({{"opname", "reconfig"}}, {{"bytes", "38"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().reconfig_rq_.value()); + EXPECT_EQ(38UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, SetWatchesRequestControlXid) { + initialize(); + + const std::vector dataw = {"/foo", "/bar"}; + const std::vector existw = {"/foo1", "/bar1"}; + const std::vector childw = {"/foo2", "/bar2"}; + + Buffer::OwnedImpl data = + encodeSetWatchesRequest(dataw, existw, childw, enumToSignedInt(XidCodes::SET_WATCHES_XID)); + + expectSetDynamicMetadata({{"opname", "setwatches"}}, {{"bytes", "76"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().setwatches_rq_.value()); + EXPECT_EQ(76UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, SetWatchesRequest) { + initialize(); + + const std::vector dataw = {"/foo", "/bar"}; + const std::vector existw = {"/foo1", "/bar1"}; + const std::vector childw = {"/foo2", "/bar2"}; + + Buffer::OwnedImpl data = encodeSetWatchesRequest(dataw, existw, childw); + + expectSetDynamicMetadata({{"opname", "setwatches"}}, {{"bytes", "76"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().setwatches_rq_.value()); + EXPECT_EQ(76UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, CheckWatchesRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePathVersion("/foo", enumToSignedInt(WatcherType::CHILDREN), + enumToSignedInt(OpCodes::CHECKWATCHES)); + + expectSetDynamicMetadata({{"opname", "checkwatches"}, {"path", "/foo"}}, {{"bytes", "24"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().checkwatches_rq_.value()); + EXPECT_EQ(24UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, RemoveWatchesRequest) { + initialize(); + + Buffer::OwnedImpl data = encodePathVersion("/foo", enumToSignedInt(WatcherType::DATA), + enumToSignedInt(OpCodes::REMOVEWATCHES)); + + expectSetDynamicMetadata({{"opname", "removewatches"}, {"path", "/foo"}}, {{"bytes", "24"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().removewatches_rq_.value()); + EXPECT_EQ(24UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +TEST_F(ZooKeeperFilterTest, CloseRequest) { + initialize(); + + Buffer::OwnedImpl data = encodeCloseRequest(); + + expectSetDynamicMetadata({{"opname", "close"}}, {{"bytes", "12"}}); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onData(data, false)); + EXPECT_EQ(1UL, config_->stats().close_rq_.value()); + EXPECT_EQ(12UL, config_->stats().request_bytes_.value()); + EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); +} + +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 92bdf69c29f24..b94e12951480b 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -275,6 +275,7 @@ abcd absl accessor accessors +acls addr agg alloc @@ -762,9 +763,11 @@ xDS xeon xform xhtml +xid xxhash xxs xyz zag zig zlib +zxid From 03b28bd21f114d927c00ba59ea79ae902ad329fb Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 25 Mar 2019 15:00:41 -0400 Subject: [PATCH 010/165] tools: updating deprecation scripts (#6289) Updating per new file locations. Updates (unused) reloadable flags to default true. Risk Level: n/a (tooling) Testing: manual Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- .../deprecate_features/deprecate_features.py | 119 +++++++++++++----- 1 file changed, 86 insertions(+), 33 deletions(-) diff --git a/tools/deprecate_features/deprecate_features.py b/tools/deprecate_features/deprecate_features.py index fd16c805d239f..3c88b1f2215ee 100644 --- a/tools/deprecate_features/deprecate_features.py +++ b/tools/deprecate_features/deprecate_features.py @@ -4,45 +4,98 @@ import subprocess import fileinput -grep_output = subprocess.check_output('grep -r "deprecated = true" api/*', shell=True) - -filenames_and_fields = set() - -# Compile the set of deprecated fields and the files they're in, deduping via set. -deprecated_regex = re.compile(r'.*\/([^\/]*.proto):[^=]* ([^= ]+) =.*') -for line in grep_output.splitlines(): - match = deprecated_regex.match(line) - if match: - filenames_and_fields.add(tuple([match.group(1), match.group(2)])) - else: - print 'no match in ' + line + ' please address manually!' - -# Now discard any deprecated features already listed in runtime_features -exiting_deprecated_regex = re.compile(r'.*"envoy.deprecated_features.(.*):(.*)",.*') -with open('source/common/runtime/runtime_features.h', 'r') as features: - for line in features.readlines(): - match = exiting_deprecated_regex.match(line) + +# Sorts out the list of deprecated proto fields which should be disallowed and returns a tuple of +# email and code changes. +def deprecate_proto(): + grep_output = subprocess.check_output('grep -r "deprecated = true" api/*', shell=True) + + filenames_and_fields = set() + + # Compile the set of deprecated fields and the files they're in, deduping via set. + deprecated_regex = re.compile(r'.*\/([^\/]*.proto):[^=]* ([^= ]+) =.*') + for line in grep_output.splitlines(): + match = deprecated_regex.match(line) if match: - filenames_and_fields.discard(tuple([match.group(1), match.group(2)])) + filenames_and_fields.add(tuple([match.group(1), match.group(2)])) + else: + print 'no match in ' + line + ' please address manually!' + + # Now discard any deprecated features already listed in runtime_features + exiting_deprecated_regex = re.compile(r'.*"envoy.deprecated_features.(.*):(.*)",.*') + with open('source/common/runtime/runtime_features.cc', 'r') as features: + for line in features.readlines(): + match = exiting_deprecated_regex.match(line) + if match: + filenames_and_fields.discard(tuple([match.group(1), match.group(2)])) + + # Finally sort out the code to add to runtime_features.cc and a canned email for envoy-announce. + code_snippets = [] + email_snippets = [] + for (filename, field) in filenames_and_fields: + code_snippets.append(' "envoy.deprecated_features.' + filename + ':' + field + '",\n') + email_snippets.append(field + ' from ' + filename + '\n') + code = ''.join(code_snippets) + email = '' + if email_snippets: + email = ('\nThe following deprecated configuration fields will be disallowed by default:\n' + + ''.join(email_snippets)) -# Finally sort out the code to add to runtime_features.h and a canned email for envoy-announce. -code = '' -email = 'The latest Envoy release will deprecate the following configuration fields:\n' -for (filename, field) in filenames_and_fields: - code += (' "envoy.deprecated_features.' + filename + ':' + field + '",\n') - email += (field + ' from ' + filename + '\n') + return email, code + + +# Sorts out the list of features which should be default enabled and returns a tuple of +# email and code changes. +def flip_runtime_features(): + grep_output = subprocess.check_output( + 'grep -r "envoy.reloadable_features\." source/*', shell=True) + + features_to_flip = set() + + # Compile the set of features to flip, deduping via set. + deprecated_regex = re.compile(r'.*"(envoy.reloadable_features\.[^"]+)".*') + for line in grep_output.splitlines(): + match = deprecated_regex.match(line) + if match: + features_to_flip.add(match.group(1)) + else: + print 'no match in ' + line + ' please address manually!' -print '\n\nSuggested runtime changes: ' -print code + # Exempt the two test flags. + features_to_flip.remove('envoy.reloadable_features.my_feature_name') + features_to_flip.remove('envoy.reloadable_features.test_feature_true') -if not raw_input('Apply runtime changes? [yN] ').strip().lower() in ('y', 'yes'): + code_snippets = [] + email_snippets = [] + for (feature) in features_to_flip: + code_snippets.append(' "' + feature + '",\n') + email_snippets.append(feature + '\n') + code = ''.join(code_snippets) + email = '' + if email_snippets: + email = 'the following features will be defaulted to true:\n' + ''.join(email_snippets) + + return email, code + + +# Gather code and suggested email changes. +runtime_email, runtime_features_code = flip_runtime_features() +deprecate_email, deprecate_code = deprecate_proto() + +email = ('The Envoy maintainer team is cutting the next Envoy release. In the new release ' + + runtime_email + deprecate_email) + +print '\n\nSuggested envoy-announce email: \n' +print email + +if not raw_input('Apply relevant runtime changes? [yN] ').strip().lower() in ('y', 'yes'): exit(1) -for line in fileinput.FileInput('source/common/runtime/runtime_features.h', inplace=1): +for line in fileinput.FileInput('source/common/runtime/runtime_features.cc', inplace=1): + if 'envoy.reloadable_features.test_feature_true' in line: + line = line.replace(line, line + runtime_features_code) if 'envoy.deprecated_features.deprecated.proto:is_deprecated_fatal' in line: - line = line.replace(line, line + code) + line = line.replace(line, line + deprecate_code) print line, -print '\nChanges applied. Please create an upstream PR and send the following to envoy-announce:\n' - -print email +print '\nChanges applied. Please send the email above to envoy-announce.\n' From 1899110a9b9f682bcdd356b5d8c14e1c7f68d04b Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 25 Mar 2019 16:04:49 -0400 Subject: [PATCH 011/165] owners: promoting Lizan to senior maintainer! (#6374) Risk Level: n/a Testing: n/a Docs Changes: yes Release Notes: no Signed-off-by: Alyssa Wilk --- OWNERS.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/OWNERS.md b/OWNERS.md index 4bfa458f37bd7..f62e2f3036830 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -17,6 +17,9 @@ routing PRs, questions, etc. to the right place. * Stephan Zuercher ([zuercher](https://github.com/zuercher)) (zuercher@gmail.com) * Load balancing, upstream clusters and cluster manager, logging, complex HTTP routing (metadata, etc.), and macOS build. +* Lizan Zhou ([lizan](https://github.com/lizan)) (lizan@tetrate.io) + * gRPC, gRPC/JSON transcoding, and core networking (transport socket abstractions), Bazel, build + issues, and CI in general. # Maintainers @@ -24,8 +27,6 @@ routing PRs, questions, etc. to the right place. * Outlier detection, HTTP routing, xDS, configuration/operational questions. * Dan Noé ([dnoe](https://github.com/dnoe)) (dpn@google.com) * Base server (watchdog, workers, startup, stack trace handling, etc.). -* Lizan Zhou ([lizan](https://github.com/lizan)) (lizan@tetrate.io) - * gRPC, gRPC/JSON transcoding, and core networking (transport socket abstractions). * Dhi Aurrahman ([dio](https://github.com/dio)) (dio@tetrate.io) * Lua, access logging, and general miscellany. * Joshua Marantz ([jmarantz](https://github.com/jmarantz)) (jmarantz@google.com) From 78ad883b70764c27f8b391ee3a5056a64b403426 Mon Sep 17 00:00:00 2001 From: Fred Douglas <43351173+fredlas@users.noreply.github.com> Date: Mon, 25 Mar 2019 18:41:50 -0400 Subject: [PATCH 012/165] config: Remove entries from initial resource versions map (#6320) Remove entry from the "initial resource versions" map when the server informs us that the corresponding resource has gone away. Risk Level: low #4991 Signed-off-by: Fred Douglas --- .../common/config/delta_subscription_impl.h | 86 +++++++++++++++---- test/common/config/BUILD | 17 ++++ .../config/delta_subscription_impl_test.cc | 80 +++++++++++++++++ 3 files changed, 167 insertions(+), 16 deletions(-) create mode 100644 test/common/config/delta_subscription_impl_test.cc diff --git a/source/common/config/delta_subscription_impl.h b/source/common/config/delta_subscription_impl.h index 5afdc83a2dd3d..deeceec73d7c8 100644 --- a/source/common/config/delta_subscription_impl.h +++ b/source/common/config/delta_subscription_impl.h @@ -24,8 +24,6 @@ struct ResourceNameDiff { std::vector removed_; }; -const char EmptyVersion[] = ""; - /** * Manages the logic of a (non-aggregated) delta xDS subscription. * TODO(fredlas) add aggregation support. @@ -53,8 +51,7 @@ class DeltaSubscriptionImpl } // Enqueues and attempts to send a discovery request, (un)subscribing to resources missing from / - // added to the passed 'resources' argument, relative to resources_. Updates resources_ to - // 'resources'. + // added to the passed 'resources' argument, relative to resource_versions_. void buildAndQueueDiscoveryRequest(const std::vector& resources) { ResourceNameDiff diff; std::set_difference(resources.begin(), resources.end(), resource_names_.begin(), @@ -63,12 +60,10 @@ class DeltaSubscriptionImpl resources.end(), std::inserter(diff.removed_, diff.removed_.begin())); for (const auto& added : diff.added_) { - resources_[added] = EmptyVersion; - resource_names_.insert(added); + setResourceWaitingForServer(added); } for (const auto& removed : diff.removed_) { - resources_.erase(removed); - resource_names_.erase(removed); + lostInterestInResource(removed); } queueDiscoveryRequest(diff); } @@ -118,13 +113,28 @@ class DeltaSubscriptionImpl } } + envoy::api::v2::DeltaDiscoveryRequest internalRequestStateForTest() const { return request_; } + // Config::SubscriptionCallbacks void onConfigUpdate(const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { callbacks_->onConfigUpdate(added_resources, removed_resources, version_info); for (const auto& resource : added_resources) { - resources_[resource.name()] = resource.version(); + setResourceVersion(resource.name(), resource.version()); + } + // If a resource is gone, there is no longer a meaningful version for it that makes sense to + // provide to the server upon stream reconnect: either it will continue to not exist, in which + // case saying nothing is fine, or the server will bring back something new, which we should + // receive regardless (which is the logic that not specifying a version will get you). + // + // So, leave the version map entry present but blank. It will be left out of + // initial_resource_versions messages, but will remind us to explicitly tell the server "I'm + // cancelling my subscription" when we lose interest. + for (const auto& resource_name : removed_resources) { + if (resource_names_.find(resource_name) != resource_names_.end()) { + setResourceWaitingForServer(resource_name); + } } stats_.update_success_.inc(); stats_.update_attempt_.inc(); @@ -161,8 +171,13 @@ class DeltaSubscriptionImpl clearRequestQueue(); request_.Clear(); - for (auto const& resource : resources_) { - (*request_.mutable_initial_resource_versions())[resource.first] = resource.second; + for (auto const& resource : resource_versions_) { + // Populate initial_resource_versions with the resource versions we currently have. Resources + // we are interested in, but are still waiting to get any version of from the server, do not + // belong in initial_resource_versions. + if (!resource.second.waitingForServer()) { + (*request_.mutable_initial_resource_versions())[resource.first] = resource.second.version(); + } } request_.set_type_url(type_url_); request_.mutable_node()->MergeFrom(local_info_.node()); @@ -210,11 +225,51 @@ class DeltaSubscriptionImpl init_fetch_timeout_timer_.reset(); } } - // A map from resource name to per-resource version. - std::unordered_map resources_; - // The keys of resources_. Only tracked separately because std::map does not provide an iterator - // into just its keys, e.g. for use in std::set_difference. + + class ResourceVersion { + public: + explicit ResourceVersion(absl::string_view version) : version_(version) {} + // Builds a ResourceVersion in the waitingForServer state. + ResourceVersion() {} + + // If true, we currently have no version of this resource - we are waiting for the server to + // provide us with one. + bool waitingForServer() const { return version_ == absl::nullopt; } + // Must not be called if waitingForServer() == true. + std::string version() const { + ASSERT(version_.has_value()); + return version_.value_or(""); + } + + private: + absl::optional version_; + }; + + // Use these helpers to avoid forgetting to update both at once. + void setResourceVersion(const std::string& resource_name, const std::string& resource_version) { + resource_versions_[resource_name] = ResourceVersion(resource_version); + resource_names_.insert(resource_name); + } + + void setResourceWaitingForServer(const std::string& resource_name) { + resource_versions_[resource_name] = ResourceVersion(); + resource_names_.insert(resource_name); + } + + void lostInterestInResource(const std::string& resource_name) { + resource_versions_.erase(resource_name); + resource_names_.erase(resource_name); + } + + // A map from resource name to per-resource version. The keys of this map are exactly the resource + // names we are currently interested in. Those in the waitingForServer state currently don't have + // any version for that resource: we need to inform the server if we lose interest in them, but we + // also need to *not* include them in the initial_resource_versions map upon a reconnect. + std::unordered_map resource_versions_; + // The keys of resource_versions_. Only tracked separately because std::map does not provide an + // iterator into just its keys, e.g. for use in std::set_difference. std::unordered_set resource_names_; + const std::string type_url_; SubscriptionCallbacks* callbacks_{}; // In-flight or previously sent request. @@ -224,7 +279,6 @@ class DeltaSubscriptionImpl absl::optional pending_; const LocalInfo::LocalInfo& local_info_; - SubscriptionStats stats_; Event::Dispatcher& dispatcher_; std::chrono::milliseconds init_fetch_timeout_; diff --git a/test/common/config/BUILD b/test/common/config/BUILD index f3f853f6856e1..7fb02b385b3ae 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -10,6 +10,23 @@ load( envoy_package() +envoy_cc_test( + name = "delta_subscription_impl_test", + srcs = ["delta_subscription_impl_test.cc"], + deps = [ + ":delta_subscription_test_harness", + "//source/common/config:delta_subscription_lib", + "//source/common/stats:isolated_store_lib", + "//test/mocks:common_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:logging_lib", + ], +) + envoy_cc_test( name = "filesystem_subscription_impl_test", srcs = ["filesystem_subscription_impl_test.cc"], diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc new file mode 100644 index 0000000000000..cbe980a526505 --- /dev/null +++ b/test/common/config/delta_subscription_impl_test.cc @@ -0,0 +1,80 @@ +#include "test/common/config/delta_subscription_test_harness.h" + +using testing::AnyNumber; +using testing::UnorderedElementsAre; + +namespace Envoy { +namespace Config { +namespace { + +class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public testing::Test {}; + +TEST_F(DeltaSubscriptionImplTest, ResourceGoneLeadsToBlankInitialVersion) { + // Envoy is interested in three resources: name1, name2, and name3. + startSubscription({"name1", "name2", "name3"}); + + // Ignore these for now, although at the very end there is one we will care about. + EXPECT_CALL(async_stream_, sendMessage(_, _)).Times(AnyNumber()); + + // Semi-hack: we don't want the requests to actually get sent, since it would clear out the + // request_ that we want to inspect. pause() does the trick! + subscription_->pause(); + + // The xDS server's first update includes items for name1 and 2, but not 3. + Protobuf::RepeatedPtrField add1_2; + auto* resource = add1_2.Add(); + resource->set_name("name1"); + resource->set_version("version1A"); + resource = add1_2.Add(); + resource->set_name("name2"); + resource->set_version("version2A"); + subscription_->onConfigUpdate(add1_2, {}, "debugversion1"); + subscription_->handleStreamEstablished(); + envoy::api::v2::DeltaDiscoveryRequest cur_request = subscription_->internalRequestStateForTest(); + EXPECT_EQ("version1A", cur_request.initial_resource_versions().at("name1")); + EXPECT_EQ("version2A", cur_request.initial_resource_versions().at("name2")); + EXPECT_EQ(cur_request.initial_resource_versions().end(), + cur_request.initial_resource_versions().find("name3")); + + // The next update updates 1, removes 2, and adds 3. The map should then have 1 and 3. + Protobuf::RepeatedPtrField add1_3; + resource = add1_3.Add(); + resource->set_name("name1"); + resource->set_version("version1B"); + resource = add1_3.Add(); + resource->set_name("name3"); + resource->set_version("version3A"); + Protobuf::RepeatedPtrField remove2; + *remove2.Add() = "name2"; + subscription_->onConfigUpdate(add1_3, remove2, "debugversion2"); + subscription_->handleStreamEstablished(); + cur_request = subscription_->internalRequestStateForTest(); + EXPECT_EQ("version1B", cur_request.initial_resource_versions().at("name1")); + EXPECT_EQ(cur_request.initial_resource_versions().end(), + cur_request.initial_resource_versions().find("name2")); + EXPECT_EQ("version3A", cur_request.initial_resource_versions().at("name3")); + + // The next update removes 1 and 3. The map we send the server should be empty... + Protobuf::RepeatedPtrField remove1_3; + *remove1_3.Add() = "name1"; + *remove1_3.Add() = "name3"; + subscription_->onConfigUpdate({}, remove1_3, "debugversion3"); + subscription_->handleStreamEstablished(); + cur_request = subscription_->internalRequestStateForTest(); + EXPECT_TRUE(cur_request.initial_resource_versions().empty()); + + // ...but our own map should remember our interest. In particular, losing interest in all 3 should + // cause their names to appear in the resource_names_unsubscribe field of a DeltaDiscoveryRequest. + subscription_->resume(); // now we do want the request to actually get sendMessage()'d. + EXPECT_CALL(async_stream_, sendMessage(_, _)).WillOnce([](const Protobuf::Message& msg, bool) { + auto sent_request = static_cast(&msg); + EXPECT_THAT(sent_request->resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(sent_request->resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); + }); + subscription_->subscribe({"name4"}); // (implies "we no longer care about name1,2,3") +} + +} // namespace +} // namespace Config +} // namespace Envoy From 046e98904f6df60f0c548ffe77ffb5f5f980179d Mon Sep 17 00:00:00 2001 From: Maxime Bedard Date: Tue, 26 Mar 2019 13:12:03 -0400 Subject: [PATCH 013/165] redis: prefixed routing (#5658) Signed-off-by: Maxime Bedard --- DEPRECATED.md | 3 + .../network/redis_proxy/v2/redis_proxy.proto | 63 +++++- docs/root/intro/arch_overview/redis.rst | 5 +- docs/root/intro/version_history.rst | 1 + source/common/common/utility.h | 34 ++- .../filters/network/redis_proxy/BUILD | 29 ++- .../redis_proxy/command_splitter_impl.cc | 41 ++-- .../redis_proxy/command_splitter_impl.h | 30 ++- .../filters/network/redis_proxy/config.cc | 43 +++- .../filters/network/redis_proxy/conn_pool.h | 2 +- .../network/redis_proxy/conn_pool_impl.h | 1 - .../network/redis_proxy/proxy_filter.cc | 2 +- .../network/redis_proxy/proxy_filter.h | 1 - .../filters/network/redis_proxy/router.h | 42 ++++ .../network/redis_proxy/router_impl.cc | 68 ++++++ .../filters/network/redis_proxy/router_impl.h | 55 +++++ test/common/common/utility_test.cc | 37 ++++ .../filters/network/redis_proxy/BUILD | 13 ++ .../redis_proxy/command_lookup_speed_test.cc | 8 +- .../redis_proxy/command_splitter_impl_test.cc | 24 +-- .../network/redis_proxy/config_test.cc | 15 ++ .../redis_proxy/conn_pool_impl_test.cc | 5 +- .../filters/network/redis_proxy/mocks.cc | 3 + .../filters/network/redis_proxy/mocks.h | 12 ++ .../network/redis_proxy/proxy_filter_test.cc | 2 +- .../network/redis_proxy/router_impl_test.cc | 199 ++++++++++++++++++ 26 files changed, 660 insertions(+), 78 deletions(-) create mode 100644 source/extensions/filters/network/redis_proxy/router.h create mode 100644 source/extensions/filters/network/redis_proxy/router_impl.cc create mode 100644 source/extensions/filters/network/redis_proxy/router_impl.h create mode 100644 test/extensions/filters/network/redis_proxy/router_impl_test.cc diff --git a/DEPRECATED.md b/DEPRECATED.md index f64cd7a3415b6..0e331d82556af 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -11,6 +11,9 @@ A logged warning is expected for each deprecated item that is in deprecation win * Use of `enabled` in `CorsPolicy`, found in [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). Set the `filter_enabled` field instead. +* Use of google.protobuf.Struct for extension opaque configs is deprecated. Use google.protobuf.Any instead or pack +google.protobuf.Struct in google.protobuf.Any. +* Use of `cluster`, found in [redis-proxy.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto) is deprecated. Set a `PrefixRoutes.catch_all_cluster` instead. ## Version 1.9.0 (Dec 20, 2018) diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index cd8c18b128755..696bf26b8b5c9 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -22,7 +22,13 @@ message RedisProxy { // Name of cluster from cluster manager. See the :ref:`configuration section // ` of the architecture overview for recommendations on // configuring the backing cluster. - string cluster = 2 [(validate.rules).string.min_bytes = 1]; + // + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch-all + // cluster` + // instead. + string cluster = 2 [deprecated = true]; // Redis connection pool settings. message ConnPoolSettings { @@ -48,10 +54,63 @@ message RedisProxy { bool enable_hashtagging = 2; } - // Network settings for the connection pool to the upstream cluster. + // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; // Indicates that latency stat should be computed in microseconds. By default it is computed in // milliseconds. bool latency_in_micros = 4; + + message PrefixRoutes { + message Route { + // String prefix that must match the beginning of the keys. Envoy will always favor the + // longest match. + string prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // Indicates if the prefix needs to be removed from the key when forwarded. + bool remove_prefix = 2; + + // Upstream cluster to forward the command to. + string cluster = 3 [(validate.rules).string.min_bytes = 1]; + } + + // List of prefix routes. + repeated Route routes = 1 [(gogoproto.nullable) = false]; + + // Indicates that prefix matching should be case insensitive. + bool case_insensitive = 2; + + // Optional catch-all route to forward commands that doesn't match any of the routes. The + // catch-all route becomes required when no routes are specified. + string catch_all_cluster = 3; + } + + // List of **unique** prefixes used to separate keys from different workloads to different + // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all + // cluster can be used to forward commands when there is no match. Time complexity of the + // lookups are in O(min(longest key prefix, key length)). + // + // Example: + // + // .. code-block:: yaml + // + // prefix_routes: + // routes: + // - prefix: "ab" + // cluster: "cluster_a" + // - prefix: "abc" + // cluster: "cluster_b" + // + // When using the above routes, the following prefixes would be sent to: + // + // * 'get abc:users' would retrive the key 'abc:users' from cluster_b. + // * 'get ab:users' would retrive the key 'ab:users' from cluster_a. + // * 'get z:users' would return a NoUpstreamHost error. A :ref:`catch-all + // cluster` + // would have retrieved the key from that cluster instead. + // + // See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing clusters. + PrefixRoutes prefix_routes = 5 [(gogoproto.nullable) = false]; } diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst index 044ea66553726..b3aa16565ad78 100644 --- a/docs/root/intro/arch_overview/redis.rst +++ b/docs/root/intro/arch_overview/redis.rst @@ -8,7 +8,9 @@ In this mode, the goals of Envoy are to maintain availability and partition tole over consistency. This is the key point when comparing Envoy to `Redis Cluster `_. Envoy is designed as a best-effort cache, meaning that it will not try to reconcile inconsistent data or keep a globally consistent -view of cluster membership. +view of cluster membership. It also supports routing commands from different workload to +different to different upstream clusters based on their access patterns, eviction, or isolation +requirements. The Redis project offers a thorough reference on partitioning as it relates to Redis. See "`Partitioning: how to split data among multiple Redis instances @@ -22,6 +24,7 @@ The Redis project offers a thorough reference on partitioning as it relates to R * Detailed command statistics. * Active and passive healthchecking. * Hash tagging. +* Prefix routing. **Planned future enhancements**: diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index b307ddc5d1bdb..18ce98def67c6 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -51,6 +51,7 @@ Version history * ratelimit: removed deprecated rate limit configuration from bootstrap. * redis: added :ref:`hashtagging ` to guarantee a given key's upstream. * redis: added :ref:`latency stats ` for commands. +* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. * redis: added :ref:`success and error stats ` for commands. * redis: migrate hash function for host selection to `MurmurHash2 `_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. * redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. diff --git a/source/common/common/utility.h b/source/common/common/utility.h index 785df6d8aa404..9eaddb7f64da1 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -568,8 +568,11 @@ template struct TrieLookupTable { * Adds an entry to the Trie at the given Key. * @param key the key used to add the entry. * @param value the value to be associated with the key. + * @param overwrite_existing will overwrite the value when the value for a given key already + * exists. + * @return false when a value already exists for the given key. */ - void add(const char* key, Value value) { + bool add(const char* key, Value value, bool overwrite_existing = true) { TrieEntry* current = &root_; while (uint8_t c = *key) { if (!current->entries_[c]) { @@ -578,7 +581,11 @@ template struct TrieLookupTable { current = current->entries_[c].get(); key++; } + if (current->value_ && !overwrite_existing) { + return false; + } current->value_ = value; + return true; } /** @@ -599,6 +606,31 @@ template struct TrieLookupTable { return current->value_; } + /** + * Finds the entry associated with the longest prefix. Complexity is O(min(longest key prefix, key + * length)) + * @param key the key used to find. + * @return the value matching the longest prefix based on the key. + */ + Value findLongestPrefix(const char* key) const { + const TrieEntry* current = &root_; + const TrieEntry* result = nullptr; + while (uint8_t c = *key) { + if (current->value_) { + result = current; + } + + // https://github.com/facebook/mcrouter/blob/master/mcrouter/lib/fbi/cpp/Trie-inl.h#L126-L143 + current = current->entries_[c].get(); + if (current == nullptr) { + return result ? result->value_ : nullptr; + } + + key++; + } + return current ? current->value_ : result->value_; + } + TrieEntry root_; }; diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 8cd0a234462e0..911edafb83684 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -30,13 +30,22 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "router_interface", + hdrs = ["router.h"], + deps = [ + ":conn_pool_interface", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", + ], +) + envoy_cc_library( name = "command_splitter_lib", srcs = ["command_splitter_impl.cc"], hdrs = ["command_splitter_impl.h"], deps = [ ":command_splitter_interface", - ":conn_pool_interface", + ":router_interface", "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan", "//source/common/common:assert_lib", @@ -54,7 +63,6 @@ envoy_cc_library( hdrs = ["conn_pool_impl.h"], deps = [ ":conn_pool_interface", - "//include/envoy/router:router_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/buffer:buffer_lib", @@ -73,6 +81,7 @@ envoy_cc_library( hdrs = ["proxy_filter.h"], deps = [ ":command_splitter_interface", + ":router_interface", "//include/envoy/network:drain_decision_interface", "//include/envoy/network:filter_interface", "//include/envoy/upstream:cluster_manager_interface", @@ -95,7 +104,21 @@ envoy_cc_library( "//source/extensions/filters/network/common:factory_base_lib", "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", - "//source/extensions/filters/network/redis_proxy:conn_pool_lib", "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", + "//source/extensions/filters/network/redis_proxy:router_lib", + ], +) + +envoy_cc_library( + name = "router_lib", + srcs = ["router_impl.cc"], + hdrs = ["router_impl.h"], + deps = [ + ":router_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:to_lower_table_lib", + "//source/extensions/filters/network/redis_proxy:conn_pool_lib", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index beea0fbaa32ee..415a754e0ac6a 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -59,15 +59,15 @@ void SingleServerRequest::cancel() { handle_ = nullptr; } -SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, +SplitRequestPtr SimpleRequest::create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ new SimpleRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[1].asString(), - incoming_request, *request_ptr); + request_ptr->handle_ = + router.makeRequest(incoming_request.asArray()[1].asString(), incoming_request, *request_ptr); if (!request_ptr->handle_) { request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); return nullptr; @@ -76,7 +76,7 @@ SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, return std::move(request_ptr); } -SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, +SplitRequestPtr EvalRequest::create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { @@ -91,8 +91,8 @@ SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, std::unique_ptr request_ptr{ new EvalRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[3].asString(), - incoming_request, *request_ptr); + request_ptr->handle_ = + router.makeRequest(incoming_request.asArray()[3].asString(), incoming_request, *request_ptr); if (!request_ptr->handle_) { command_stats.error_.inc(); request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); @@ -123,7 +123,7 @@ void FragmentedRequest::onChildFailure(uint32_t index) { onChildResponse(Utility::makeError(Response::get().UpstreamFailure), index); } -SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, +SplitRequestPtr MGETRequest::create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { @@ -152,8 +152,8 @@ SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, single_mget.asArray()[1].asString() = incoming_request.asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel get: '{}'", single_mget.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), - single_mget, pending_request); + pending_request.handle_ = + router.makeRequest(incoming_request.asArray()[i].asString(), single_mget, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -195,7 +195,7 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, +SplitRequestPtr MSETRequest::create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { @@ -231,8 +231,8 @@ SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, single_mset.asArray()[2].asString() = incoming_request.asArray()[i + 1].asString(); ENVOY_LOG(debug, "redis: parallel set: '{}'", single_mset.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), - single_mset, pending_request); + pending_request.handle_ = + router.makeRequest(incoming_request.asArray()[i].asString(), single_mset, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -270,7 +270,7 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, +SplitRequestPtr SplitKeysSumResultRequest::create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, @@ -299,8 +299,8 @@ SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, single_fragment.asArray()[1].asString() = incoming_request.asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request.asArray()[0].asString(), single_fragment.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), - single_fragment, pending_request); + pending_request.handle_ = router.makeRequest(incoming_request.asArray()[i].asString(), + single_fragment, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -337,12 +337,11 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va } } -InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, - const std::string& stat_prefix, TimeSource& time_source, - bool latency_in_micros) - : conn_pool_(std::move(conn_pool)), simple_command_handler_(*conn_pool_), - eval_command_handler_(*conn_pool_), mget_handler_(*conn_pool_), mset_handler_(*conn_pool_), - split_keys_sum_result_handler_(*conn_pool_), +InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros) + : router_(std::move(router)), simple_command_handler_(*router_), + eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_), + split_keys_sum_result_handler_(*router_), stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))}, latency_in_micros_(latency_in_micros), time_source_(time_source) { for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index b7ac2b90f409b..45ac46b71cd37 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -17,6 +17,7 @@ #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" +#include "extensions/filters/network/redis_proxy/router.h" namespace Envoy { namespace Extensions { @@ -68,9 +69,9 @@ class CommandHandler { class CommandHandlerBase { protected: - CommandHandlerBase(ConnPool::Instance& conn_pool) : conn_pool_(conn_pool) {} + CommandHandlerBase(Router& router) : router_(router) {} - ConnPool::Instance& conn_pool_; + Router& router_; }; class SplitRequestBase : public SplitRequest { @@ -121,8 +122,7 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien */ class SimpleRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -137,8 +137,7 @@ class SimpleRequest : public SingleServerRequest { */ class EvalRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -195,8 +194,7 @@ class FragmentedRequest : public SplitRequestBase { */ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -217,8 +215,7 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -240,8 +237,7 @@ class SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -261,11 +257,11 @@ class MSETRequest : public FragmentedRequest, Logger::Loggable class CommandHandlerFactory : public CommandHandler, CommandHandlerBase { public: - CommandHandlerFactory(ConnPool::Instance& conn_pool) : CommandHandlerBase(conn_pool) {} + CommandHandlerFactory(Router& router) : CommandHandlerBase(router) {} SplitRequestPtr startRequest(const Common::Redis::RespValue& request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { - return RequestClass::create(conn_pool_, request, callbacks, command_stats, time_source, + return RequestClass::create(router_, request, callbacks, command_stats, time_source, latency_in_micros); } }; @@ -288,8 +284,8 @@ struct InstanceStats { class InstanceImpl : public Instance, Logger::Loggable { public: - InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, - const std::string& stat_prefix, TimeSource& time_source, bool latency_in_micros); + InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros); // RedisProxy::CommandSplitter::Instance SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, @@ -307,7 +303,7 @@ class InstanceImpl : public Instance, Logger::Loggable { CommandHandler& handler); void onInvalidRequest(SplitCallbacks& callbacks); - ConnPool::InstancePtr conn_pool_; + RouterPtr router_; CommandHandlerFactory simple_command_handler_; CommandHandlerFactory eval_command_handler_; CommandHandlerFactory mget_handler_; diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index bae74e8633713..9838c2cc5ebf4 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -11,8 +11,8 @@ #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" -#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" #include "extensions/filters/network/redis_proxy/proxy_filter.h" +#include "extensions/filters/network/redis_proxy/router_impl.h" namespace Envoy { namespace Extensions { @@ -24,18 +24,43 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP Server::Configuration::FactoryContext& context) { ASSERT(!proto_config.stat_prefix().empty()); - ASSERT(!proto_config.cluster().empty()); ASSERT(proto_config.has_settings()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, context.scope(), context.drainDecision(), context.runtime())); - ConnPool::InstancePtr conn_pool( - new ConnPool::InstanceImpl(filter_config->cluster_name_, context.clusterManager(), - Common::Redis::Client::ClientFactoryImpl::instance_, - context.threadLocal(), proto_config.settings())); - std::shared_ptr splitter(new CommandSplitter::InstanceImpl( - std::move(conn_pool), context.scope(), filter_config->stat_prefix_, context.timeSource(), - proto_config.latency_in_micros())); + + envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes prefix_routes( + proto_config.prefix_routes()); + + // set the catch-all route from the deprecated cluster and settings parameters. + if (prefix_routes.catch_all_cluster().empty() && prefix_routes.routes_size() == 0) { + if (proto_config.cluster().empty()) { + throw EnvoyException("cannot configure a redis-proxy without any upstream"); + } + + prefix_routes.set_catch_all_cluster(proto_config.cluster()); + } + + std::set unique_clusters; + for (auto& route : prefix_routes.routes()) { + unique_clusters.emplace(route.cluster()); + } + unique_clusters.emplace(prefix_routes.catch_all_cluster()); + + Upstreams upstreams; + for (auto& cluster : unique_clusters) { + upstreams.emplace(cluster, std::make_shared( + cluster, context.clusterManager(), + Common::Redis::Client::ClientFactoryImpl::instance_, + context.threadLocal(), proto_config.settings())); + } + + auto router = std::make_unique(prefix_routes, std::move(upstreams)); + + std::shared_ptr splitter = + std::make_shared( + std::move(router), context.scope(), filter_config->stat_prefix_, context.timeSource(), + proto_config.latency_in_micros()); return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { Common::Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index 442219e79b547..713e4f7310cc5 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -36,7 +36,7 @@ class Instance { Common::Redis::Client::PoolCallbacks& callbacks) PURE; }; -typedef std::unique_ptr InstancePtr; +typedef std::shared_ptr InstanceSharedPtr; } // namespace ConnPool } // namespace RedisProxy diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index 1dfb363573ab2..17facb93afbc4 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -37,7 +37,6 @@ class InstanceImpl : public Instance { const std::string& cluster_name, Upstream::ClusterManager& cm, Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls, const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); - // RedisProxy::ConnPool::Instance Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, const Common::Redis::RespValue& request, diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index d5fc143e9be09..f36d692ea9cae 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -17,7 +17,7 @@ namespace RedisProxy { ProxyFilterConfig::ProxyFilterConfig( const envoy::config::filter::network::redis_proxy::v2::RedisProxy& config, Stats::Scope& scope, const Network::DrainDecision& drain_decision, Runtime::Loader& runtime) - : drain_decision_(drain_decision), runtime_(runtime), cluster_name_(config.cluster()), + : drain_decision_(drain_decision), runtime_(runtime), stat_prefix_(fmt::format("redis.{}.", config.stat_prefix())), stats_(generateStats(stat_prefix_, scope)) {} diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index 3f8dc62d6eecd..ae2141a322d94 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -56,7 +56,6 @@ class ProxyFilterConfig { const Network::DrainDecision& drain_decision_; Runtime::Loader& runtime_; - const std::string cluster_name_; const std::string stat_prefix_; const std::string redis_drain_close_runtime_key_{"redis.drain_close_enabled"}; ProxyStats stats_; diff --git a/source/extensions/filters/network/redis_proxy/router.h b/source/extensions/filters/network/redis_proxy/router.h new file mode 100644 index 0000000000000..1317b170aca4c --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" + +#include "extensions/filters/network/redis_proxy/conn_pool.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +/* + * Decorator of a connection pool in order to enable key based routing. + */ +class Router { +public: + virtual ~Router() = default; + + /** + * Forwards the request to the connection pool that matches a route or uses the wildcard route + * when no match is found. + * @param key supplies the key of the current command. + * @param request supplies the RESP request to make. + * @param callbacks supplies the request callbacks. + * @return PoolRequest* a handle to the active request or nullptr if the request could not be made + * for some reason. + */ + virtual Common::Redis::Client::PoolRequest* + makeRequest(const std::string& key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) PURE; +}; + +typedef std::unique_ptr RouterPtr; + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.cc b/source/extensions/filters/network/redis_proxy/router_impl.cc new file mode 100644 index 0000000000000..009cc345b3844 --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router_impl.cc @@ -0,0 +1,68 @@ +#include "extensions/filters/network/redis_proxy/router_impl.h" + +#include "common/common/fmt.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +PrefixRoutes::PrefixRoutes( + const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& config, + Upstreams&& upstreams) + : case_insensitive_(config.case_insensitive()), upstreams_(std::move(upstreams)), + catch_all_upstream_(config.catch_all_cluster().empty() + ? nullptr + : upstreams_.at(config.catch_all_cluster())) { + + for (auto const& route : config.routes()) { + std::string copy(route.prefix()); + + if (case_insensitive_) { + to_lower_table_.toLowerCase(copy); + } + + auto success = prefix_lookup_table_.add(copy.c_str(), + std::make_shared(Prefix{ + route.prefix(), + route.remove_prefix(), + upstreams_.at(route.cluster()), + }), + false); + if (!success) { + throw EnvoyException(fmt::format("prefix `{}` already exists.", route.prefix())); + } + } +} + +Common::Redis::Client::PoolRequest* +PrefixRoutes::makeRequest(const std::string& key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) { + + PrefixPtr value = nullptr; + if (case_insensitive_) { + std::string copy(key); + to_lower_table_.toLowerCase(copy); + value = prefix_lookup_table_.findLongestPrefix(copy.c_str()); + } else { + value = prefix_lookup_table_.findLongestPrefix(key.c_str()); + } + + if (value != nullptr) { + absl::string_view view(key); + if (value->remove_prefix) { + view.remove_prefix(value->prefix.length()); + } + std::string str(view); + value->upstream->makeRequest(str, request, callbacks); + } else if (catch_all_upstream_ != nullptr) { + catch_all_upstream_.value()->makeRequest(key, request, callbacks); + } + + return nullptr; +} + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.h b/source/extensions/filters/network/redis_proxy/router_impl.h new file mode 100644 index 0000000000000..0c3d50356c02d --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router_impl.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/to_lower_table.h" + +#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" +#include "extensions/filters/network/redis_proxy/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +typedef std::map Upstreams; + +class PrefixRoutes : public Router { +public: + PrefixRoutes(const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& + prefix_routes, + Upstreams&& upstreams); + + Common::Redis::Client::PoolRequest* + makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) override; + +private: + struct Prefix { + const std::string prefix; + const bool remove_prefix; + ConnPool::InstanceSharedPtr upstream; + }; + + typedef std::shared_ptr PrefixPtr; + + TrieLookupTable prefix_lookup_table_; + const ToLowerTable to_lower_table_; + const bool case_insensitive_; + Upstreams upstreams_; + absl::optional catch_all_upstream_; +}; + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/common/common/utility_test.cc b/test/common/common/utility_test.cc index 6434cd140280b..e2a084651065a 100644 --- a/test/common/common/utility_test.cc +++ b/test/common/common/utility_test.cc @@ -828,4 +828,41 @@ TEST(DateFormatter, FromTimeSameWildcard) { DateFormatter("%Y-%m-%dT%H:%M:%S.000Z%1f%2f").fromTime(time1)); } +TEST(TrieLookupTable, AddItems) { + TrieLookupTable trie; + EXPECT_TRUE(trie.add("foo", "a")); + EXPECT_TRUE(trie.add("bar", "b")); + EXPECT_EQ("a", trie.find("foo")); + EXPECT_EQ("b", trie.find("bar")); + + // overwrite_existing = false + EXPECT_FALSE(trie.add("foo", "c", false)); + EXPECT_EQ("a", trie.find("foo")); + + // overwrite_existing = true + EXPECT_TRUE(trie.add("foo", "c")); + EXPECT_EQ("c", trie.find("foo")); +} + +TEST(TrieLookupTable, LongestPrefix) { + TrieLookupTable trie; + EXPECT_TRUE(trie.add("foo", "a")); + EXPECT_TRUE(trie.add("bar", "b")); + EXPECT_TRUE(trie.add("baro", "c")); + + EXPECT_EQ("a", trie.find("foo")); + EXPECT_EQ("a", trie.findLongestPrefix("foo")); + EXPECT_EQ("a", trie.findLongestPrefix("foosball")); + + EXPECT_EQ("b", trie.find("bar")); + EXPECT_EQ("b", trie.findLongestPrefix("bar")); + EXPECT_EQ("b", trie.findLongestPrefix("baritone")); + EXPECT_EQ("c", trie.findLongestPrefix("barometer")); + + EXPECT_EQ(nullptr, trie.find("toto")); + EXPECT_EQ(nullptr, trie.findLongestPrefix("toto")); + EXPECT_EQ(nullptr, trie.find(" ")); + EXPECT_EQ(nullptr, trie.findLongestPrefix(" ")); +} + } // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 492404c41547e..7b6629b6e4917 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -75,6 +75,7 @@ envoy_cc_mock( "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", + "//source/extensions/filters/network/redis_proxy:router_interface", ], ) @@ -104,3 +105,15 @@ envoy_extension_cc_test_binary( "//test/test_common:simulated_time_system_lib", ], ) + +envoy_extension_cc_test( + name = "router_impl_test", + srcs = ["router_impl_test.cc"], + extension_name = "envoy.filters.network.redis_proxy", + deps = [ + ":redis_mocks", + "//source/extensions/filters/network/redis_proxy:router_lib", + "//test/extensions/filters/network/common/redis:redis_mocks", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index 2f4d8e30e1b0b..d70fdb02a5e02 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -30,7 +30,7 @@ class NoOpSplitCallbacks : public CommandSplitter::SplitCallbacks { void onResponse(Common::Redis::RespValuePtr&&) override {} }; -class NullInstanceImpl : public ConnPool::Instance { +class NullRouterImpl : public Router { Common::Redis::Client::PoolRequest* makeRequest(const std::string&, const Common::Redis::RespValue&, Common::Redis::Client::PoolCallbacks&) override { @@ -65,11 +65,11 @@ class CommandLookUpSpeedTest { } } - ConnPool::Instance* conn_pool_{new NullInstanceImpl()}; + Router* router_{new NullRouterImpl()}; Stats::IsolatedStoreImpl store_; Event::SimulatedTimeSystem time_system_; - CommandSplitter::InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", - time_system_, false}; + CommandSplitter::InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, + false}; NoOpSplitCallbacks callbacks_; CommandSplitter::SplitRequestPtr handle_; }; diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index 252078432334a..ff52c8013496d 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -50,11 +50,10 @@ class RedisCommandSplitterImplTest : public testing::Test { value.asArray().swap(values); } - ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; + MockRouter* router_{new MockRouter()}; NiceMock store_; Event::SimulatedTimeSystem time_system_; - InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", time_system_, - false}; + InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, false}; MockSplitCallbacks callbacks_; SplitRequestPtr handle_; }; @@ -111,7 +110,7 @@ class RedisSingleServerRequestTest : public RedisCommandSplitterImplTest, public testing::WithParamInterface { public: void makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request) { - EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(request), _)) + EXPECT_CALL(*router_, makeRequest(hash_key, Ref(request), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); handle_ = splitter_.makeRequest(request, callbacks_); } @@ -223,7 +222,7 @@ TEST_P(RedisSingleServerRequestTest, NoUpstream) { Common::Redis::RespValue request; makeBulkStringArray(request, {GetParam(), "hello"}); - EXPECT_CALL(*conn_pool_, makeRequest("hello", Ref(request), _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*router_, makeRequest("hello", Ref(request), _)).WillOnce(Return(nullptr)); Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; @@ -324,7 +323,7 @@ TEST_F(RedisSingleServerRequestTest, EvalNoUpstream) { Common::Redis::RespValue request; makeBulkStringArray(request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); - EXPECT_CALL(*conn_pool_, makeRequest("key", Ref(request), _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*router_, makeRequest("key", Ref(request), _)).WillOnce(Return(nullptr)); Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; @@ -359,7 +358,7 @@ class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest { null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; } - EXPECT_CALL(*conn_pool_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) + EXPECT_CALL(*router_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } @@ -562,7 +561,7 @@ class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest { null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; } - EXPECT_CALL(*conn_pool_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) + EXPECT_CALL(*router_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } @@ -685,7 +684,7 @@ class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest, null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; } - EXPECT_CALL(*conn_pool_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) + EXPECT_CALL(*router_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } @@ -773,14 +772,13 @@ INSTANTIATE_TEST_SUITE_P( class RedisSingleServerRequestWithLatencyMicrosTest : public RedisSingleServerRequestTest { public: void makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request) { - EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(request), _)) + EXPECT_CALL(*router_, makeRequest(hash_key, Ref(request), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); handle_ = splitter_.makeRequest(request, callbacks_); } - ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; - InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", time_system_, - true}; + MockRouter* router_{new MockRouter()}; + InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, true}; }; TEST_P(RedisSingleServerRequestWithLatencyMicrosTest, Success) { diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 074862e5718c8..be23782420b43 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -23,6 +23,21 @@ TEST(RedisProxyFilterConfigFactoryTest, ValidateFail) { ProtoValidationException); } +TEST(RedisProxyFilterConfigFactoryTest, NoUpstreamDefined) { + envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings settings; + settings.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(20)); + + envoy::config::filter::network::redis_proxy::v2::RedisProxy config; + config.set_stat_prefix("foo"); + config.mutable_settings()->CopyFrom(settings); + + NiceMock context; + + EXPECT_THROW_WITH_MESSAGE( + RedisProxyFilterConfigFactory().createFilterFactoryFromProto(config, context), EnvoyException, + "cannot configure a redis-proxy without any upstream"); +} + TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectJson) { std::string json_string = R"EOF( { diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index bd267cd1670d2..464b1eff494f1 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -43,7 +43,8 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client if (!cluster_exists) { EXPECT_CALL(cm_, get("fake_cluster")).WillOnce(Return(nullptr)); } - conn_pool_ = std::make_unique(cluster_name_, cm_, *this, tls_, + + conn_pool_ = std::make_shared(cluster_name_, cm_, *this, tls_, Common::Redis::Client::createConnPoolSettings()); } @@ -74,7 +75,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client const std::string cluster_name_{"fake_cluster"}; NiceMock cm_; NiceMock tls_; - InstancePtr conn_pool_; + InstanceSharedPtr conn_pool_; Upstream::ClusterUpdateCallbacks* update_callbacks_{}; Common::Redis::Client::MockClient* client_{}; }; diff --git a/test/extensions/filters/network/redis_proxy/mocks.cc b/test/extensions/filters/network/redis_proxy/mocks.cc index 7e0ce1eff0bde..3bbb28baba804 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.cc +++ b/test/extensions/filters/network/redis_proxy/mocks.cc @@ -15,6 +15,9 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { +MockRouter::MockRouter() {} +MockRouter::~MockRouter() {} + namespace ConnPool { MockInstance::MockInstance() {} diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index 19c724ac74478..e959475542654 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -8,6 +8,7 @@ #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" +#include "extensions/filters/network/redis_proxy/router.h" #include "test/test_common/printers.h" @@ -18,6 +19,17 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { +class MockRouter : public Router { +public: + MockRouter(); + ~MockRouter(); + + MOCK_METHOD3(makeRequest, + Common::Redis::Client::PoolRequest*( + const std::string& hash_key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks)); +}; + namespace ConnPool { class MockInstance : public Instance { diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index 333a9687dc501..4cb73b89186b2 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -60,7 +60,7 @@ TEST_F(RedisProxyFilterConfigTest, Normal) { envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config = parseProtoFromJson(json_string); ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_); - EXPECT_EQ("fake_cluster", config.cluster_name_); + EXPECT_EQ("redis.foo.", config.stat_prefix_); } TEST_F(RedisProxyFilterConfigTest, BadRedisProxyConfig) { diff --git a/test/extensions/filters/network/redis_proxy/router_impl_test.cc b/test/extensions/filters/network/redis_proxy/router_impl_test.cc new file mode 100644 index 0000000000000..62b012e4abc27 --- /dev/null +++ b/test/extensions/filters/network/redis_proxy/router_impl_test.cc @@ -0,0 +1,199 @@ +#include + +#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" +#include "extensions/filters/network/redis_proxy/router_impl.h" + +#include "test/extensions/filters/network/common/redis/mocks.h" +#include "test/extensions/filters/network/redis_proxy/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Eq; +using testing::InSequence; +using testing::Return; +using testing::StrEq; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes createPrefixRoutes() { + envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes prefix_routes; + auto* routes = prefix_routes.mutable_routes(); + + { + auto* route = routes->Add(); + route->set_prefix("ab"); + route->set_cluster("fake_clusterA"); + } + + { + auto* route = routes->Add(); + route->set_prefix("a"); + route->set_cluster("fake_clusterB"); + } + + return prefix_routes; +} + +TEST(PrefixRoutesTest, MissingCatchAll) { + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", std::make_shared()); + + PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); + + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + EXPECT_EQ(nullptr, router.makeRequest("c:bar", value, callbacks)); +} + +TEST(PrefixRoutesTest, RoutedToCatchAll) { + auto upstream_c = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", std::make_shared()); + upstreams.emplace("fake_clusterC", upstream_c); + + auto prefix_routes = createPrefixRoutes(); + prefix_routes.set_catch_all_cluster("fake_clusterC"); + + EXPECT_CALL(*upstream_c, makeRequest(Eq("c:bar"), _, _)); + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + EXPECT_EQ(nullptr, router.makeRequest("c:bar", value, callbacks)); +} + +TEST(PrefixRoutesTest, RoutedToLongestPrefix) { + auto upstream_a = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", upstream_a); + upstreams.emplace("fake_clusterB", std::make_shared()); + + EXPECT_CALL(*upstream_a, makeRequest(Eq("ab:bar"), _, _)); + + PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + EXPECT_EQ(nullptr, router.makeRequest("ab:bar", value, callbacks)); +} + +TEST(PrefixRoutesTest, CaseUnsensitivePrefix) { + auto upstream_a = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", upstream_a); + upstreams.emplace("fake_clusterB", std::make_shared()); + + auto prefix_routes = createPrefixRoutes(); + prefix_routes.set_case_insensitive(true); + + EXPECT_CALL(*upstream_a, makeRequest(Eq("AB:bar"), _, _)); + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + EXPECT_EQ(nullptr, router.makeRequest("AB:bar", value, callbacks)); +} + +TEST(PrefixRoutesTest, RemovePrefix) { + auto upstream_a = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", upstream_a); + upstreams.emplace("fake_clusterB", std::make_shared()); + + auto prefix_routes = createPrefixRoutes(); + + { + auto* route = prefix_routes.mutable_routes()->Add(); + route->set_prefix("abc"); + route->set_cluster("fake_clusterA"); + route->set_remove_prefix(true); + } + + EXPECT_CALL(*upstream_a, makeRequest(Eq(":bar"), _, _)); + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + EXPECT_EQ(nullptr, router.makeRequest("abc:bar", value, callbacks)); +} + +TEST(PrefixRoutesTest, RoutedToShortestPrefix) { + auto upstream_b = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", upstream_b); + + EXPECT_CALL(*upstream_b, makeRequest(Eq("a:bar"), _, _)); + + PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + EXPECT_EQ(nullptr, router.makeRequest("a:bar", value, callbacks)); +} + +TEST(PrefixRoutesTest, DifferentPrefixesSameUpstream) { + auto upstream_b = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", upstream_b); + + auto prefix_routes = createPrefixRoutes(); + + { + auto* route = prefix_routes.mutable_routes()->Add(); + route->set_prefix("also_route_to_b"); + route->set_cluster("fake_clusterB"); + } + + EXPECT_CALL(*upstream_b, makeRequest(Eq("a:bar"), _, _)); + EXPECT_CALL(*upstream_b, makeRequest(Eq("also_route_to_b:bar"), _, _)); + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + EXPECT_EQ(nullptr, router.makeRequest("a:bar", value, callbacks)); + EXPECT_EQ(nullptr, router.makeRequest("also_route_to_b:bar", value, callbacks)); +} + +TEST(PrefixRoutesTest, DuplicatePrefix) { + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", std::make_shared()); + upstreams.emplace("this_will_throw", std::make_shared()); + + auto prefix_routes = createPrefixRoutes(); + + { + auto* route = prefix_routes.mutable_routes()->Add(); + route->set_prefix("ab"); + route->set_cluster("this_will_throw"); + } + + EXPECT_THROW_WITH_MESSAGE(PrefixRoutes router(prefix_routes, std::move(upstreams)), + EnvoyException, "prefix `ab` already exists.") +} + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy From 8fb00ba4348dcbc30a012890393bf8471f119bdf Mon Sep 17 00:00:00 2001 From: Bin Wu <46450037+wu-bin@users.noreply.github.com> Date: Tue, 26 Mar 2019 13:54:23 -0400 Subject: [PATCH 014/165] Add quic_expect_bug_impl.h, (spdy|http2)_logging_impl.h, (spdy|http2)_bug_tracker_impl.h QUICHE platform implementation (#6339) Add quic_expect_bug_impl.h, (spdy|http2)_logging_impl.h, (spdy|http2)_bug_tracker_impl.h QUICHE platform implementation. All of them depends on quic_logging_impl.h. Risk Level: minimum, code not used yet. Testing: bazel test test/extensions/quic_listeners/quiche/platform:spdy_platform_test --test_output=all --define quiche=enabled bazel test test/extensions/quic_listeners/quiche/platform:http2_platform_test --test_output=all --define quiche=enabled bazel test test/extensions/quic_listeners/quiche/platform:quic_platform_test --test_output=all --define quiche=enabled bazel test @com_googlesource_quiche//:spdy_platform_test --test_output=all --define quiche=enabled bazel test @com_googlesource_quiche//:http2_platform_test --test_output=all --define quiche=enabled bazel test @com_googlesource_quiche//:quic_platform_test --test_output=all --define quiche=enabled Signed-off-by: Bin Wu --- bazel/external/quiche.BUILD | 35 ++++++++-------- .../quic_listeners/quiche/platform/BUILD | 33 ++++++++++++--- .../quiche/platform/http2_bug_tracker_impl.h | 13 ++++++ .../quiche/platform/http2_logging_impl.h | 23 +++++++++++ .../quiche/platform/quic_bug_tracker_impl.h | 12 +++--- .../quiche/platform/quic_expect_bug_impl.h | 16 ++++++++ .../quiche/platform/quic_logging_impl.cc | 15 +++++++ .../quiche/platform/quic_logging_impl.h | 8 ++++ .../quiche/platform/quic_mock_log_impl.h | 40 +++++++++++++++++++ .../quiche/platform/spdy_bug_tracker_impl.h | 13 ++++++ .../quiche/platform/spdy_logging_impl.h | 21 ++++++++++ .../quic_listeners/quiche/platform/BUILD | 6 ++- .../quiche/platform/http2_platform_test.cc | 35 ++++++++++++++++ .../quiche/platform/quic_platform_test.cc | 13 ++++++ .../quiche/platform/spdy_platform_test.cc | 33 +++++++++++++++ 15 files changed, 288 insertions(+), 28 deletions(-) create mode 100644 source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h create mode 100644 source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h create mode 100644 source/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h create mode 100644 source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h create mode 100644 source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index e41693b201776..149db513f83f3 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -62,13 +62,15 @@ cc_library( "quiche/http2/platform/api/http2_string.h", "quiche/http2/platform/api/http2_string_piece.h", # TODO: uncomment the following files as implementations are added. - # "quiche/http2/platform/api/http2_bug_tracker.h", # "quiche/http2/platform/api/http2_flags.h", - # "quiche/http2/platform/api/http2_mock_log.h", # "quiche/http2/platform/api/http2_reconstruct_object.h", # "quiche/http2/platform/api/http2_test_helpers.h", ] + envoy_select_quiche( - ["quiche/http2/platform/api/http2_string_utils.h"], + [ + "quiche/http2/platform/api/http2_bug_tracker.h", + "quiche/http2/platform/api/http2_logging.h", + "quiche/http2/platform/api/http2_string_utils.h", + ], "@envoy", ), visibility = ["//visibility:public"], @@ -90,7 +92,11 @@ cc_library( # TODO: uncomment the following files as implementations are added. # "quiche/spdy/platform/api/spdy_flags.h", ] + envoy_select_quiche( - ["quiche/spdy/platform/api/spdy_string_utils.h"], + [ + "quiche/spdy/platform/api/spdy_bug_tracker.h", + "quiche/spdy/platform/api/spdy_logging.h", + "quiche/spdy/platform/api/spdy_string_utils.h", + ], "@envoy", ), visibility = ["//visibility:public"], @@ -130,7 +136,6 @@ cc_library( hdrs = [ "quiche/quic/platform/api/quic_aligned.h", "quiche/quic/platform/api/quic_arraysize.h", - "quiche/quic/platform/api/quic_bug_tracker.h", "quiche/quic/platform/api/quic_client_stats.h", "quiche/quic/platform/api/quic_containers.h", "quiche/quic/platform/api/quic_endian.h", @@ -139,21 +144,16 @@ cc_library( "quiche/quic/platform/api/quic_fallthrough.h", "quiche/quic/platform/api/quic_flag_utils.h", "quiche/quic/platform/api/quic_iovec.h", - "quiche/quic/platform/api/quic_logging.h", "quiche/quic/platform/api/quic_map_util.h", - "quiche/quic/platform/api/quic_mock_log.h", "quiche/quic/platform/api/quic_prefetch.h", "quiche/quic/platform/api/quic_ptr_util.h", "quiche/quic/platform/api/quic_reference_counted.h", "quiche/quic/platform/api/quic_server_stats.h", - "quiche/quic/platform/api/quic_stack_trace.h", "quiche/quic/platform/api/quic_string_piece.h", "quiche/quic/platform/api/quic_test_output.h", "quiche/quic/platform/api/quic_uint128.h", - "quiche/quic/platform/api/quic_thread.h", # TODO: uncomment the following files as implementations are added. # "quiche/quic/platform/api/quic_clock.h", - # "quiche/quic/platform/api/quic_expect_bug.h", # "quiche/quic/platform/api/quic_file_utils.h", # "quiche/quic/platform/api/quic_flags.h", # "quiche/quic/platform/api/quic_fuzzed_data_provider.h", @@ -166,15 +166,19 @@ cc_library( # "quiche/quic/platform/api/quic_mem_slice_storage.h", # "quiche/quic/platform/api/quic_pcc_sender.h", # "quiche/quic/platform/api/quic_socket_address.h", - # "quiche/quic/platform/api/quic_stack_trace.h", - # "quiche/quic/platform/api/quic_test.h", # "quiche/quic/platform/api/quic_test_loopback.h", # "quiche/quic/platform/api/quic_test_mem_slice_vector.h", ] + envoy_select_quiche( [ + "quiche/quic/platform/api/quic_bug_tracker.h", + "quiche/quic/platform/api/quic_expect_bug.h", + "quiche/quic/platform/api/quic_mock_log.h", + "quiche/quic/platform/api/quic_logging.h", + "quiche/quic/platform/api/quic_stack_trace.h", "quiche/quic/platform/api/quic_string_utils.h", "quiche/quic/platform/api/quic_test.h", "quiche/quic/platform/api/quic_text_utils.h", + "quiche/quic/platform/api/quic_thread.h", ], "@envoy", ), @@ -222,12 +226,11 @@ envoy_cc_test( envoy_cc_test( name = "quic_platform_test", - srcs = [ - "quiche/quic/platform/api/quic_reference_counted_test.cc", - ] + envoy_select_quiche( + srcs = envoy_select_quiche( [ - "quiche/quic/platform/api/quic_text_utils_test.cc", + "quiche/quic/platform/api/quic_reference_counted_test.cc", "quiche/quic/platform/api/quic_string_utils_test.cc", + "quiche/quic/platform/api/quic_text_utils_test.cc", ], "@envoy", ), diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 922912ccef2b0..f6f8c67631a02 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -42,14 +42,21 @@ envoy_cc_library( "http2_ptr_util_impl.h", "http2_string_impl.h", "http2_string_piece_impl.h", - ] + envoy_select_quiche(["http2_string_utils_impl.h"]), + ] + envoy_select_quiche([ + "http2_bug_tracker_impl.h", + "http2_logging_impl.h", + "http2_string_utils_impl.h", + ]), external_deps = [ "abseil_base", "abseil_optional", "abseil_str_format", ], visibility = ["//visibility:public"], - deps = envoy_select_quiche([":string_utils_lib"]), + deps = envoy_select_quiche([ + ":quic_platform_logging_impl_lib", + ":string_utils_lib", + ]), ) envoy_cc_library( @@ -58,9 +65,19 @@ envoy_cc_library( visibility = ["//visibility:public"], ) +envoy_cc_library( + name = "quic_platform_logging_impl_lib", + srcs = ["quic_logging_impl.cc"], + hdrs = [ + "quic_bug_tracker_impl.h", + "quic_logging_impl.h", + ], + visibility = ["//visibility:public"], + deps = ["//source/common/common:assert_lib"], +) + envoy_cc_library( name = "quic_platform_base_impl_lib", - srcs = envoy_select_quiche(["quic_logging_impl.cc"]), hdrs = [ "quic_aligned_impl.h", "quic_arraysize_impl.h", @@ -79,6 +96,7 @@ envoy_cc_library( "quic_string_piece_impl.h", "quic_uint128_impl.h", ] + envoy_select_quiche([ + "quic_expect_bug_impl.h", "quic_logging_impl.h", "quic_mock_log_impl.h", "quic_stack_trace_impl.h", @@ -96,6 +114,7 @@ envoy_cc_library( ], visibility = ["//visibility:public"], deps = ["@com_googlesource_quiche//:quic_platform_export"] + envoy_select_quiche([ + ":quic_platform_logging_impl_lib", "//include/envoy/thread:thread_interface", "//source/common/common:assert_lib", "//source/server:backtrace_lib", @@ -113,7 +132,6 @@ envoy_cc_library( "quic_mutex_impl.h", "quic_str_cat_impl.h", ] + envoy_select_quiche([ - "quic_bug_tracker_impl.h", "quic_hostname_utils_impl.h", "quic_string_utils_impl.h", "quic_test_output_impl.h", @@ -157,7 +175,11 @@ envoy_cc_library( "spdy_test_helpers_impl.h", "spdy_test_utils_prod_impl.h", "spdy_unsafe_arena_impl.h", - ] + envoy_select_quiche(["spdy_string_utils_impl.h"]), + ] + envoy_select_quiche([ + "spdy_bug_tracker_impl.h", + "spdy_logging_impl.h", + "spdy_string_utils_impl.h", + ]), external_deps = [ "abseil_base", "abseil_hash", @@ -167,6 +189,7 @@ envoy_cc_library( ], visibility = ["//visibility:public"], deps = envoy_select_quiche([ + ":quic_platform_logging_impl_lib", ":string_utils_lib", "//source/common/common:assert_lib", ]), diff --git a/source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h new file mode 100644 index 0000000000000..58c7039d536bb --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/http2_bug_tracker_impl.h @@ -0,0 +1,13 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h" + +#define HTTP2_BUG_IMPL QUIC_BUG_IMPL +#define HTTP2_BUG_IF_IMPL QUIC_BUG_IF_IMPL +#define FLAGS_http2_always_log_bugs_for_tests_IMPL true diff --git a/source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h new file mode 100644 index 0000000000000..473c2d00d4bd4 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/http2_logging_impl.h @@ -0,0 +1,23 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" + +#define HTTP2_LOG_IMPL(severity) QUIC_LOG_IMPL(severity) + +#define HTTP2_VLOG_IMPL(verbose_level) QUIC_VLOG_IMPL(verbose_level) + +#define HTTP2_DLOG_IMPL(severity) QUIC_DLOG_IMPL(severity) + +#define HTTP2_DLOG_IF_IMPL(severity, condition) QUIC_DLOG_IF_IMPL(severity, condition) + +#define HTTP2_DVLOG_IMPL(verbose_level) QUIC_DVLOG_IMPL(verbose_level) + +#define HTTP2_DVLOG_IF_IMPL(verbose_level, condition) QUIC_DVLOG_IF_IMPL(verbose_level, condition) + +#define HTTP2_DLOG_EVERY_N_IMPL(severity, n) QUIC_DLOG_EVERY_N_IMPL(severity, n) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h index 4f30441d1b9ac..050bd385d8818 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h @@ -6,9 +6,11 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. -#include "quiche/quic/platform/api/quic_logging.h" +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" -#define QUIC_BUG_IMPL QUIC_LOG(DFATAL) -#define QUIC_BUG_IF_IMPL(condition) QUIC_LOG_IF(DFATAL, condition) -#define QUIC_PEER_BUG_IMPL QUIC_LOG(ERROR) -#define QUIC_PEER_BUG_IF_IMPL(condition) QUIC_LOG_IF(ERROR, condition) +// TODO(wub): Implement exponential back off to avoid performance problems due +// to excessive QUIC_BUG. +#define QUIC_BUG_IMPL QUIC_LOG_IMPL(DFATAL) +#define QUIC_BUG_IF_IMPL(condition) QUIC_LOG_IF_IMPL(DFATAL, condition) +#define QUIC_PEER_BUG_IMPL QUIC_LOG_IMPL(ERROR) +#define QUIC_PEER_BUG_IF_IMPL(condition) QUIC_LOG_IF_IMPL(ERROR, condition) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h new file mode 100644 index 0000000000000..213f8ab397a02 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h @@ -0,0 +1,16 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" +#include "extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h" + +#define EXPECT_QUIC_BUG_IMPL(statement, regex) \ + EXPECT_QUIC_DFATAL_IMPL(statement, testing::ContainsRegex(regex)) + +#define EXPECT_QUIC_PEER_BUG_IMPL(statement, regex) \ + EXPECT_QUIC_LOG_IMPL(statement, ERROR, testing::ContainsRegex(regex)) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc index cee7e7decb32f..578fbffc28335 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc @@ -12,6 +12,7 @@ namespace quic { namespace { std::atomic g_verbosity_threshold; +std::atomic g_dfatal_exit_disabled; // Pointer to the global log sink, usually it is nullptr. // If not nullptr, as in some tests, the sink will receive a copy of the log message right after the @@ -39,7 +40,15 @@ QuicLogEmitter::~QuicLogEmitter() { } if (level_ == FATAL) { +#ifdef NDEBUG + // Release mode. abort(); +#else + // Debug mode. + if (!g_dfatal_exit_disabled) { + abort(); + } +#endif } } @@ -49,6 +58,12 @@ void SetVerbosityLogThreshold(int new_verbosity) { g_verbosity_threshold.store(new_verbosity, std::memory_order_relaxed); } +bool IsDFatalExitDisabled() { return g_dfatal_exit_disabled.load(std::memory_order_relaxed); } + +void SetDFatalExitDisabled(bool is_disabled) { + g_dfatal_exit_disabled.store(is_disabled, std::memory_order_relaxed); +} + QuicLogSink* SetLogSink(QuicLogSink* new_sink) { absl::MutexLock lock(&g_quic_log_sink_mutex); QuicLogSink* old_sink = g_quic_log_sink.load(std::memory_order_relaxed); diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h index b71dba7b39259..bf4e426616fa1 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h @@ -42,6 +42,9 @@ // TODO(wub): Implement QUIC_LOG_FIRST_N_IMPL. #define QUIC_LOG_FIRST_N_IMPL(severity, n) QUIC_LOG_IMPL(severity) +// TODO(wub): Implement QUIC_LOG_EVERY_N_IMPL. +#define QUIC_LOG_EVERY_N_IMPL(severity, n) QUIC_LOG_IMPL(severity) + // TODO(wub): Implement QUIC_LOG_EVERY_N_SEC_IMPL. #define QUIC_LOG_EVERY_N_SEC_IMPL(severity, seconds) QUIC_LOG_IMPL(severity) @@ -65,6 +68,7 @@ #define QUIC_DLOG_IMPL(severity) QUIC_COMPILED_OUT_LOG() #define QUIC_DLOG_IF_IMPL(severity, condition) QUIC_COMPILED_OUT_LOG() #define QUIC_DLOG_INFO_IS_ON_IMPL() 0 +#define QUIC_DLOG_EVERY_N_IMPL(severity, n) QUIC_COMPILED_OUT_LOG() #define QUIC_NOTREACHED_IMPL() #else // Debug build @@ -74,6 +78,7 @@ #define QUIC_DLOG_IMPL(severity) QUIC_LOG_IMPL(severity) #define QUIC_DLOG_IF_IMPL(severity, condition) QUIC_LOG_IF_IMPL(severity, condition) #define QUIC_DLOG_INFO_IS_ON_IMPL() QUIC_LOG_INFO_IS_ON_IMPL() +#define QUIC_DLOG_EVERY_N_IMPL(severity, n) QUIC_LOG_EVERY_N_IMPL(severity, n) #define QUIC_NOTREACHED_IMPL() NOT_REACHED_GCOVR_EXCL_LINE #endif @@ -135,6 +140,9 @@ inline bool IsVerboseLogEnabled(int verbosity) { return IsLogLevelEnabled(INFO) && verbosity <= GetVerbosityLogThreshold(); } +bool IsDFatalExitDisabled(); +void SetDFatalExitDisabled(bool is_disabled); + // QuicLogSink is used to capture logs emitted from the QUIC_LOG... macros. class QuicLogSink { public: diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h index 4f3c6d4e5c7a1..ed5298a07f2bc 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h @@ -47,6 +47,24 @@ class QuicEnvoyMockLog : public QuicLogSink { bool is_capturing_; }; +// ScopedDisableExitOnDFatal is used to disable exiting the program when we encounter a +// QUIC_LOG(DFATAL) within the current block. After we leave the current block, the previous +// behavior is restored. +class ScopedDisableExitOnDFatal { +public: + ScopedDisableExitOnDFatal() : previous_value_(IsDFatalExitDisabled()) { + SetDFatalExitDisabled(true); + } + + ScopedDisableExitOnDFatal(const ScopedDisableExitOnDFatal&) = delete; + ScopedDisableExitOnDFatal& operator=(const ScopedDisableExitOnDFatal&) = delete; + + ~ScopedDisableExitOnDFatal() { SetDFatalExitDisabled(previous_value_); } + +private: + const bool previous_value_; +}; + } // namespace quic using QuicMockLogImpl = quic::QuicEnvoyMockLog; @@ -57,3 +75,25 @@ using QuicMockLogImpl = quic::QuicEnvoyMockLog; #define EXPECT_QUIC_LOG_CALL_CONTAINS_IMPL(log, level, content) \ EXPECT_CALL(log, Log(quic::level, testing::HasSubstr(content))) + +// Not part of the api exposed by quic_mock_log.h. This is used by +// quic_expect_bug_impl.h. +#define EXPECT_QUIC_LOG_IMPL(statement, level, matcher) \ + do { \ + quic::QuicEnvoyMockLog mock_log; \ + EXPECT_CALL(mock_log, Log(quic::level, matcher)).Times(testing::AtLeast(1)); \ + mock_log.StartCapturingLogs(); \ + { statement; } \ + mock_log.StopCapturingLogs(); \ + if (!testing::Mock::VerifyAndClear(&mock_log)) { \ + GTEST_NONFATAL_FAILURE_(""); \ + } \ + } while (false) + +#define EXPECT_QUIC_DFATAL_IMPL(statement, matcher) \ + EXPECT_QUIC_LOG_IMPL( \ + { \ + quic::ScopedDisableExitOnDFatal disable_exit_on_dfatal; \ + statement; \ + }, \ + DFATAL, matcher) diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h new file mode 100644 index 0000000000000..93cb60e469695 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_bug_tracker_impl.h @@ -0,0 +1,13 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_bug_tracker_impl.h" + +#define SPDY_BUG_IMPL QUIC_BUG_IMPL +#define SPDY_BUG_IF_IMPL QUIC_BUG_IF_IMPL +#define FLAGS_spdy_always_log_bugs_for_tests_impl true diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h new file mode 100644 index 0000000000000..4a21b95ab34d6 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/spdy_logging_impl.h @@ -0,0 +1,21 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" + +#define SPDY_LOG_IMPL(severity) QUIC_LOG_IMPL(severity) + +#define SPDY_VLOG_IMPL(verbose_level) QUIC_VLOG_IMPL(verbose_level) + +#define SPDY_DLOG_IMPL(severity) QUIC_DLOG_IMPL(severity) + +#define SPDY_DLOG_IF_IMPL(severity, condition) QUIC_DLOG_IF_IMPL(severity, condition) + +#define SPDY_DVLOG_IMPL(verbose_level) QUIC_DVLOG_IMPL(verbose_level) + +#define SPDY_DVLOG_IF_IMPL(verbose_level, condition) QUIC_DVLOG_IF_IMPL(verbose_level, condition) diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index c24563392a970..16d008648f526 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -15,9 +15,10 @@ envoy_package() envoy_cc_test( name = "http2_platform_test", - srcs = ["http2_platform_test.cc"], + srcs = envoy_select_quiche(["http2_platform_test.cc"]), external_deps = ["quiche_http2_platform"], deps = [ + "//test/test_common:logging_lib", "//test/test_common:utility_lib", ], ) @@ -39,9 +40,10 @@ envoy_cc_test( envoy_cc_test( name = "spdy_platform_test", - srcs = ["spdy_platform_test.cc"], + srcs = envoy_select_quiche(["spdy_platform_test.cc"]), external_deps = ["quiche_spdy_platform"], deps = [ + "//test/test_common:logging_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc index f17b77ae9808c..10b9b38787e1e 100644 --- a/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc @@ -1,9 +1,13 @@ #include +#include "test/test_common/logging.h" + #include "gtest/gtest.h" #include "quiche/http2/platform/api/http2_arraysize.h" +#include "quiche/http2/platform/api/http2_bug_tracker.h" #include "quiche/http2/platform/api/http2_containers.h" #include "quiche/http2/platform/api/http2_estimate_memory_usage.h" +#include "quiche/http2/platform/api/http2_logging.h" #include "quiche/http2/platform/api/http2_optional.h" #include "quiche/http2/platform/api/http2_ptr_util.h" #include "quiche/http2/platform/api/http2_string.h" @@ -26,6 +30,14 @@ TEST(Http2PlatformTest, Http2Arraysize) { EXPECT_EQ(5, HTTP2_ARRAYSIZE(array)); } +TEST(Http2PlatformTest, Http2BugTracker) { + EXPECT_DEBUG_DEATH(HTTP2_BUG << "Here is a bug,", " bug"); + EXPECT_DEBUG_DEATH(HTTP2_BUG_IF(true) << "There is a bug,", " bug"); + EXPECT_LOG_NOT_CONTAINS("error", "", HTTP2_BUG_IF(false) << "A feature is not a bug."); + + EXPECT_EQ(true, FLAGS_http2_always_log_bugs_for_tests); +} + TEST(Http2PlatformTest, Http2Deque) { http2::Http2Deque deque; deque.push_back(10); @@ -38,6 +50,29 @@ TEST(Http2PlatformTest, Http2EstimateMemoryUsage) { EXPECT_EQ(0, http2::Http2EstimateMemoryUsage(s)); } +TEST(Http2PlatformTest, Http2Log) { + // HTTP2_LOG macros are defined to QUIC_LOG macros, which is tested in + // QuicPlatformTest. Here we just make sure HTTP2_LOG macros compile. + HTTP2_LOG(INFO) << "INFO log may not show up by default."; + HTTP2_LOG(ERROR) << "ERROR log should show up by default."; + + // VLOG are only emitted if INFO is enabled and verbosity level is high enough. + HTTP2_VLOG(1) << "VLOG(1)"; + + HTTP2_DLOG(INFO) << "DLOG(INFO)"; + HTTP2_DLOG(ERROR) << "DLOG(ERROR)"; + + HTTP2_DLOG_IF(ERROR, true) << "DLOG_IF(ERROR, true)"; + HTTP2_DLOG_IF(ERROR, false) << "DLOG_IF(ERROR, false)"; + + HTTP2_DVLOG(2) << "DVLOG(2)"; + + HTTP2_DVLOG_IF(3, true) << "DVLOG_IF(3, true)"; + HTTP2_DVLOG_IF(4, false) << "DVLOG_IF(4, false)"; + + HTTP2_DLOG_EVERY_N(ERROR, 2) << "DLOG_EVERY_N(ERROR, 2)"; +} + TEST(Http2PlatformTest, Http2Optional) { http2::Http2Optional opt; EXPECT_FALSE(opt.has_value()); diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 10557ac8b91bb..8f0b4ebf9e880 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -18,6 +18,7 @@ #include "quiche/quic/platform/api/quic_containers.h" #include "quiche/quic/platform/api/quic_endian.h" #include "quiche/quic/platform/api/quic_estimate_memory_usage.h" +#include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_exported_stats.h" #include "quiche/quic/platform/api/quic_hostname_utils.h" #include "quiche/quic/platform/api/quic_logging.h" @@ -74,6 +75,18 @@ TEST(QuicPlatformTest, QuicClientStats) { QuicClientSparseHistogram("my.sparse.histogram", 345); } +TEST(QuicPlatformTest, QuicExpectBug) { + auto bug = [](const char* error_message) { QUIC_BUG << error_message; }; + + auto peer_bug = [](const char* error_message) { QUIC_PEER_BUG << error_message; }; + + EXPECT_QUIC_BUG(bug("bug one is expected"), "bug one"); + EXPECT_QUIC_BUG(bug("bug two is expected"), "bug two"); + + EXPECT_QUIC_PEER_BUG(peer_bug("peer_bug_1 is expected"), "peer_bug_1"); + EXPECT_QUIC_PEER_BUG(peer_bug("peer_bug_2 is expected"), "peer_bug_2"); +} + TEST(QuicPlatformTest, QuicExportedStats) { // Just make sure they compile. QUIC_HISTOGRAM_ENUM("my.enum.histogram", TestEnum::ONE, TestEnum::COUNT, "doc"); diff --git a/test/extensions/quic_listeners/quiche/platform/spdy_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/spdy_platform_test.cc index 8c3571920a3e2..6ac1874cad0e6 100644 --- a/test/extensions/quic_listeners/quiche/platform/spdy_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/spdy_platform_test.cc @@ -1,10 +1,14 @@ #include +#include "test/test_common/logging.h" + #include "gtest/gtest.h" #include "quiche/spdy/platform/api/spdy_arraysize.h" +#include "quiche/spdy/platform/api/spdy_bug_tracker.h" #include "quiche/spdy/platform/api/spdy_containers.h" #include "quiche/spdy/platform/api/spdy_endianness_util.h" #include "quiche/spdy/platform/api/spdy_estimate_memory_usage.h" +#include "quiche/spdy/platform/api/spdy_logging.h" #include "quiche/spdy/platform/api/spdy_ptr_util.h" #include "quiche/spdy/platform/api/spdy_string.h" #include "quiche/spdy/platform/api/spdy_string_piece.h" @@ -26,6 +30,14 @@ TEST(SpdyPlatformTest, SpdyArraysize) { EXPECT_EQ(5, SPDY_ARRAYSIZE(array)); } +TEST(SpdyPlatformTest, SpdyBugTracker) { + EXPECT_DEBUG_DEATH(SPDY_BUG << "Here is a bug,", " bug"); + EXPECT_DEBUG_DEATH(SPDY_BUG_IF(true) << "There is a bug,", " bug"); + EXPECT_LOG_NOT_CONTAINS("error", "", SPDY_BUG_IF(false) << "A feature is not a bug."); + + EXPECT_EQ(true, FLAGS_spdy_always_log_bugs_for_tests); +} + TEST(SpdyPlatformTest, SpdyHashMap) { spdy::SpdyHashMap hmap; hmap.insert({"foo", 2}); @@ -51,6 +63,27 @@ TEST(SpdyPlatformTest, SpdyEstimateMemoryUsage) { EXPECT_EQ(0, spdy::SpdyEstimateMemoryUsage(s)); } +TEST(SpdyPlatformTest, SpdyLog) { + // SPDY_LOG macros are defined to QUIC_LOG macros, which is tested in + // QuicPlatformTest. Here we just make sure SPDY_LOG macros compile. + SPDY_LOG(INFO) << "INFO log may not show up by default."; + SPDY_LOG(ERROR) << "ERROR log should show up by default."; + + // VLOG is only emitted if INFO is enabled and verbosity level is high enough. + SPDY_VLOG(1) << "VLOG(1)"; + + SPDY_DLOG(INFO) << "DLOG(INFO)"; + SPDY_DLOG(ERROR) << "DLOG(ERROR)"; + + SPDY_DLOG_IF(ERROR, true) << "DLOG_IF(ERROR, true)"; + SPDY_DLOG_IF(ERROR, false) << "DLOG_IF(ERROR, false)"; + + SPDY_DVLOG(2) << "DVLOG(2)"; + + SPDY_DVLOG_IF(3, true) << "DVLOG_IF(3, true)"; + SPDY_DVLOG_IF(4, false) << "DVLOG_IF(4, false)"; +} + TEST(SpdyPlatformTest, SpdyMakeUnique) { auto p = spdy::SpdyMakeUnique(4); EXPECT_EQ(4, *p); From 7de2b39eeb7d0929fecb00e7b81c70236c3a4869 Mon Sep 17 00:00:00 2001 From: Kyle Larose Date: Tue, 26 Mar 2019 15:07:50 -0400 Subject: [PATCH 015/165] upstream: allow configuration of connection pool limits (#6298) We want to limit the number of connection pools per cluster. Add it to the circut breaker thresholds so we can do it per priority. Signed-off-by: Kyle Larose --- .../api/v2/cluster/circuit_breaker.proto | 7 + .../cluster_manager/cluster_stats.rst | 1 + .../intro/arch_overview/circuit_breaking.rst | 16 ++ docs/root/intro/version_history.rst | 1 + include/envoy/upstream/resource_manager.h | 10 ++ include/envoy/upstream/upstream.h | 4 +- source/common/upstream/BUILD | 26 +++- .../common/upstream/cluster_manager_impl.cc | 28 ++-- source/common/upstream/cluster_manager_impl.h | 8 +- source/common/upstream/conn_pool_map.h | 13 +- source/common/upstream/conn_pool_map_impl.h | 36 +++-- .../common/upstream/priority_conn_pool_map.h | 59 ++++++++ .../upstream/priority_conn_pool_map_impl.h | 61 ++++++++ .../common/upstream/resource_manager_impl.h | 15 +- source/common/upstream/upstream_impl.cc | 5 + test/common/http/http1/conn_pool_test.cc | 6 +- test/common/http/http2/conn_pool_test.cc | 2 +- test/common/router/retry_state_impl_test.cc | 2 +- test/common/tcp/conn_pool_test.cc | 10 +- test/common/tcp_proxy/tcp_proxy_test.cc | 2 +- test/common/upstream/BUILD | 15 ++ .../upstream/cluster_manager_impl_test.cc | 2 - .../upstream/conn_pool_map_impl_test.cc | 66 +++++++- .../priority_conn_pool_map_impl_test.cc | 142 ++++++++++++++++++ .../upstream/resource_manager_impl_test.cc | 20 ++- test/integration/integration_test.cc | 28 ++++ test/mocks/upstream/cluster_info.cc | 3 + test/mocks/upstream/cluster_info.h | 7 +- 28 files changed, 537 insertions(+), 58 deletions(-) create mode 100644 source/common/upstream/priority_conn_pool_map.h create mode 100644 source/common/upstream/priority_conn_pool_map_impl.h create mode 100644 test/common/upstream/priority_conn_pool_map_impl_test.cc diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto index ebee99dae163d..f219fa07b4feb 100644 --- a/api/envoy/api/v2/cluster/circuit_breaker.proto +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -51,6 +51,13 @@ message CircuitBreakers { // the number of resources remaining until the circuit breakers open. If // not specified, the default is false. bool track_remaining = 6; + + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + google.protobuf.UInt32Value max_connection_pools = 7; } // If multiple :ref:`Thresholds` diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst index 47c2a011c3e66..370a9e1402d17 100644 --- a/docs/root/configuration/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/cluster_manager/cluster_stats.rst @@ -149,6 +149,7 @@ Circuit breakers statistics will be rooted at *cluster..circuit_breakers.< :widths: 1, 1, 2 cx_open, Gauge, Whether the connection circuit breaker is closed (0) or open (1) + cx_pool_open, Gauge, Whether the connection pool circuit breaker is closed (0) or open (1) rq_pending_open, Gauge, Whether the pending requests circuit breaker is closed (0) or open (1) rq_open, Gauge, Whether the requests circuit breaker is closed (0) or open (1) rq_retry_open, Gauge, Whether the retry circuit breaker is closed (0) or open (1) diff --git a/docs/root/intro/arch_overview/circuit_breaking.rst b/docs/root/intro/arch_overview/circuit_breaking.rst index b2b6e31aa8c30..152284363fb51 100644 --- a/docs/root/intro/arch_overview/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/circuit_breaking.rst @@ -9,6 +9,8 @@ mesh is that Envoy enforces circuit breaking limits at the network level as oppo configure and code each application independently. Envoy supports various types of fully distributed (not coordinated) circuit breaking: +.. _arch_overview_circuit_break_cluster_maximum_connections: + * **Cluster maximum connections**: The maximum number of connections that Envoy will establish to all hosts in an upstream cluster. In practice this is only applicable to HTTP/1.1 clusters since HTTP/2 uses a single connection to each host. If this circuit breaker overflows the :ref:`upstream_cx_overflow @@ -34,6 +36,20 @@ configure and code each application independently. Envoy supports various types :ref:`upstream_rq_retry_overflow ` counter for the cluster will increment. + .. _arch_overview_circuit_break_cluster_maximum_connection_pools: + +* **Cluster maximum concurrent connection pools**: The maximum number of connection pools that can be + concurrently instantiated. Some features, such as the + :ref:`Original Src Listener Filter `, can + create an unbounded number of connection pools. When a cluster has exhausted its concurrent + connection pools, it will attempt to reclaim an idle one. If it cannot, then the circuit breaker + will overflow. This differs from + :ref:`Cluster maximum connections ` in that + connection pools never time out, whereas connections typically will. Connections automatically + clean up; connection pools do not. Note that in order for a connection pool to function it needs + at least one upstream connection, so this value should likely be no greater than + :ref:`Cluster maximum connections `. + Each circuit breaking limit is :ref:`configurable ` and tracked on a per upstream cluster and per priority basis. This allows different components of the distributed system to be tuned independently and have different limits. The live state of these diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 18ce98def67c6..e9a2a7c75434a 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -70,6 +70,7 @@ Version history * upstream: added :ref:`degraded health value` which allows routing to certain hosts only when there are insufficient healthy hosts available. * upstream: add cluster factory to allow creating and registering :ref:`custom cluster type`. +* upstream: added a :ref:`circuit breaker ` to limit the number of concurrent connection pools in use. * tracing: added :ref:`verbose ` to support logging annotations on spans. * upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size` config parameter to strictly bound the ring size. * zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events). diff --git a/include/envoy/upstream/resource_manager.h b/include/envoy/upstream/resource_manager.h index 4bd45feed3ddd..902ce75955f68 100644 --- a/include/envoy/upstream/resource_manager.h +++ b/include/envoy/upstream/resource_manager.h @@ -37,6 +37,11 @@ class Resource { */ virtual void dec() PURE; + /** + * Decrement the resource count by a specific amount. + */ + virtual void decBy(uint64_t amount) PURE; + /** * @return the current maximum allowed number of this resource. */ @@ -73,6 +78,11 @@ class ResourceManager { * @return Resource& active retries. */ virtual Resource& retries() PURE; + + /** + * @return Resource& active connection pools. + */ + virtual Resource& connectionPools() PURE; }; } // namespace Upstream diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index e2c2300d6e5d6..3fdb15cdeee17 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -537,10 +537,12 @@ class PrioritySet { OPEN_GAUGE (rq_pending_open) \ OPEN_GAUGE (rq_open) \ OPEN_GAUGE (rq_retry_open) \ + OPEN_GAUGE (cx_pool_open) \ REMAINING_GAUGE (remaining_cx) \ REMAINING_GAUGE (remaining_pending) \ REMAINING_GAUGE (remaining_rq) \ - REMAINING_GAUGE (remaining_retries) + REMAINING_GAUGE (remaining_retries) \ + REMAINING_GAUGE (remaining_cx_pools) // clang-format on /** diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index bd09d3f9329b1..0629e568dd77b 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -60,8 +60,7 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/router:shadow_writer_lib", "//source/common/tcp:conn_pool_lib", - "//source/common/upstream:conn_pool_map", - "//source/common/upstream:conn_pool_map_impl_lib", + "//source/common/upstream:priority_conn_pool_map_impl_lib", "//source/common/upstream:upstream_lib", "@envoy_api//envoy/admin/v2alpha:config_dump_cc", "@envoy_api//envoy/api/v2/core:base_cc", @@ -73,6 +72,8 @@ envoy_cc_library( hdrs = ["conn_pool_map.h"], deps = [ "//include/envoy/event:dispatcher_interface", + "//include/envoy/upstream:resource_manager_interface", + "//include/envoy/upstream:upstream_interface", "//source/common/common:debug_recursion_checker_lib", ], ) @@ -85,6 +86,27 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "priority_conn_pool_map", + hdrs = ["priority_conn_pool_map.h"], + deps = [ + ":conn_pool_map", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/upstream:resource_manager_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:debug_recursion_checker_lib", + ], +) + +envoy_cc_library( + name = "priority_conn_pool_map_impl_lib", + hdrs = ["priority_conn_pool_map_impl.h"], + deps = [ + ":conn_pool_map_impl_lib", + ":priority_conn_pool_map", + ], +) + envoy_cc_library( name = "edf_scheduler_lib", hdrs = ["edf_scheduler.h"], diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index d178871866286..8880e01ff547f 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -31,10 +31,10 @@ #include "common/router/shadow_writer_impl.h" #include "common/tcp/conn_pool.h" #include "common/upstream/cds_api_impl.h" -#include "common/upstream/conn_pool_map_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/maglev_lb.h" #include "common/upstream/original_dst_cluster.h" +#include "common/upstream/priority_conn_pool_map_impl.h" #include "common/upstream/ring_hash_lb.h" #include "common/upstream/subset_lb.h" @@ -1043,7 +1043,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::getHttpConnPoolsContainer( if (!allocate) { return nullptr; } - ConnPoolsContainer container{thread_local_dispatcher_}; + ConnPoolsContainer container{thread_local_dispatcher_, host}; container_iter = host_http_conn_pool_map_.emplace(host, std::move(container)).first; } @@ -1132,7 +1132,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( } // Inherit socket options from downstream connection, if set. - std::vector hash_key = {uint8_t(protocol), uint8_t(priority)}; + std::vector hash_key = {uint8_t(protocol)}; // Use downstream connection socket options for computing connection pool hash key, if any. // This allows socket options to control connection pooling so that connections with @@ -1153,16 +1153,18 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( // Note: to simplify this, we assume that the factory is only called in the scope of this // function. Otherwise, we'd need to capture a few of these variables by value. - ConnPoolsContainer::ConnPools::OptPoolRef pool = container.pools_->getPool(hash_key, [&]() { - return parent_.parent_.factory_.allocateConnPool( - parent_.thread_local_dispatcher_, host, priority, protocol, - have_options ? context->downstreamConnection()->socketOptions() : nullptr); - }); - // The Connection Pool tracking is a work in progress. We plan for it to eventually have the - // ability to fail, but until we add upper layer handling for failures, it should not. So, assert - // that we don't accidentally add conditions that could allow it to fail. - ASSERT(pool.has_value(), "Pool allocation should never fail"); - return &(pool.value().get()); + ConnPoolsContainer::ConnPools::OptPoolRef pool = + container.pools_->getPool(priority, hash_key, [&]() { + return parent_.parent_.factory_.allocateConnPool( + parent_.thread_local_dispatcher_, host, priority, protocol, + have_options ? context->downstreamConnection()->socketOptions() : nullptr); + }); + + if (pool.has_value()) { + return &(pool.value().get()); + } else { + return nullptr; + } } Tcp::ConnectionPool::Instance* diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 0e9e8eb52667d..9586357bc4aa1 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -22,8 +22,8 @@ #include "common/config/grpc_mux_impl.h" #include "common/http/async_client_impl.h" -#include "common/upstream/conn_pool_map.h" #include "common/upstream/load_stats_reporter.h" +#include "common/upstream/priority_conn_pool_map.h" #include "common/upstream/upstream_impl.h" namespace Envoy { @@ -236,10 +236,10 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable(dispatcher, absl::nullopt)} {} + ConnPoolsContainer(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host) + : pools_{std::make_shared(dispatcher, host)} {} - typedef ConnPoolMap, Http::ConnectionPool::Instance> ConnPools; + typedef PriorityConnPoolMap, Http::ConnectionPool::Instance> ConnPools; // This is a shared_ptr so we can keep it alive while cleaning up. std::shared_ptr pools_; diff --git a/source/common/upstream/conn_pool_map.h b/source/common/upstream/conn_pool_map.h index f0ef4cd4391d9..20127dbe401a4 100644 --- a/source/common/upstream/conn_pool_map.h +++ b/source/common/upstream/conn_pool_map.h @@ -4,6 +4,8 @@ #include #include "envoy/event/dispatcher.h" +#include "envoy/upstream/resource_manager.h" +#include "envoy/upstream/upstream.h" #include "common/common/debug_recursion_checker.h" @@ -21,7 +23,8 @@ template class ConnPoolMap { using DrainedCb = std::function; using OptPoolRef = absl::optional>; - ConnPoolMap(Event::Dispatcher& dispatcher, absl::optional max_size); + ConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host, + ResourcePriority priority); ~ConnPoolMap(); /** * Returns an existing pool for `key`, or creates a new one using `factory`. Note that it is @@ -60,11 +63,17 @@ template class ConnPoolMap { */ bool freeOnePool(); + /** + * Cleans up the active_pools_ map and updates resource tracking + **/ + void clearActivePools(); + absl::flat_hash_map> active_pools_; Event::Dispatcher& thread_local_dispatcher_; std::vector cached_callbacks_; Common::DebugRecursionChecker recursion_checker_; - const absl::optional max_size_; + const HostConstSharedPtr host_; + const ResourcePriority priority_; }; } // namespace Upstream diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index 598dea9a21f85..156751942535f 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -7,11 +7,17 @@ namespace Upstream { template ConnPoolMap::ConnPoolMap(Envoy::Event::Dispatcher& dispatcher, - absl::optional max_size) - : thread_local_dispatcher_(dispatcher), max_size_(max_size) {} - -template -ConnPoolMap::~ConnPoolMap() = default; + const HostConstSharedPtr& host, + ResourcePriority priority) + : thread_local_dispatcher_(dispatcher), host_(host), priority_(priority) {} + +template ConnPoolMap::~ConnPoolMap() { + // Clean up the pools to ensure resource tracking is kept up to date. Note that we do not call + // `clear()` here to avoid doing a deferred delete. This triggers some unwanted race conditions + // on shutdown where deleted resources end up putting stuff on the deferred delete list after the + // worker threads have shut down. + clearActivePools(); +} template typename ConnPoolMap::OptPoolRef @@ -24,15 +30,19 @@ ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& facto if (pool_iter != active_pools_.end()) { return std::ref(*(pool_iter->second)); } - + Resource& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); // We need a new pool. Check if we have room. - if (max_size_.has_value() && size() >= max_size_.value()) { + if (!connPoolResource.canCreate()) { // We're full. Try to free up a pool. If we can't, bail out. if (!freeOnePool()) { + // TODO(klarose): Add some explicit counters for failure cases here, similar to the other + // circuit breakers. return absl::nullopt; } - ASSERT(size() < max_size_.value(), "Freeing a pool should reduce the size to below the max."); + ASSERT(size() < connPoolResource.max(), + "Freeing a pool should reduce the size to below the max."); + // TODO(klarose): Consider some simple hysteresis here. How can we prevent iterating over all // pools when we're at the limit every time we want to allocate a new one, even if most of the // pools are not busy, while balancing that with not unnecessarily freeing all pools? If we @@ -42,6 +52,7 @@ ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& facto // We have room for a new pool. Allocate one and let it know about any cached callbacks. auto new_pool = factory(); + connPoolResource.inc(); for (const auto& cb : cached_callbacks_) { new_pool->addDrainedCallback(cb); } @@ -60,8 +71,7 @@ template void ConnPoolMap @@ -96,11 +106,17 @@ bool ConnPoolMap::freeOnePool() { if (pool_iter != active_pools_.end()) { // We found one. Free it up, and let the caller know. active_pools_.erase(pool_iter); + host_->cluster().resourceManager(priority_).connectionPools().dec(); return true; } return false; } +template +void ConnPoolMap::clearActivePools() { + host_->cluster().resourceManager(priority_).connectionPools().decBy(active_pools_.size()); + active_pools_.clear(); +} } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/priority_conn_pool_map.h b/source/common/upstream/priority_conn_pool_map.h new file mode 100644 index 0000000000000..30636728d31b4 --- /dev/null +++ b/source/common/upstream/priority_conn_pool_map.h @@ -0,0 +1,59 @@ +#pragma once + +#include "envoy/event/dispatcher.h" +#include "envoy/upstream/resource_manager.h" +#include "envoy/upstream/upstream.h" + +#include "common/upstream/conn_pool_map.h" + +namespace Envoy { +namespace Upstream { +/** + * A class mapping keys to connection pools, with some recycling logic built in. + */ +template class PriorityConnPoolMap { +public: + using ConnPoolMapType = ConnPoolMap; + using PoolFactory = typename ConnPoolMapType::PoolFactory; + using DrainedCb = typename ConnPoolMapType::DrainedCb; + using OptPoolRef = typename ConnPoolMapType::OptPoolRef; + + PriorityConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host); + ~PriorityConnPoolMap(); + /** + * Returns an existing pool for the given priority and `key`, or creates a new one using + * `factory`. Note that it is possible for this to fail if a limit on the number of pools allowed + * is reached. + * @return The pool corresponding to `key`, or `absl::nullopt`. + */ + OptPoolRef getPool(ResourcePriority priority, KEY_TYPE key, const PoolFactory& factory); + + /** + * @return the number of pools across all priorities. + */ + size_t size() const; + + /** + * Destroys all mapped pools. + */ + void clear(); + + /** + * Adds a drain callback to all mapped pools. Any future mapped pools with have the callback + * automatically added. Be careful with the callback. If it itself calls into `this`, modifying + * the state of `this`, there is a good chance it will cause corruption due to the callback firing + * immediately. + */ + void addDrainedCallback(const DrainedCb& cb); + + /** + * Instructs each connection pool to drain its connections. + */ + void drainConnections(); + +private: + std::array, NumResourcePriorities> conn_pool_maps_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/priority_conn_pool_map_impl.h b/source/common/upstream/priority_conn_pool_map_impl.h new file mode 100644 index 0000000000000..cfe1c021393bc --- /dev/null +++ b/source/common/upstream/priority_conn_pool_map_impl.h @@ -0,0 +1,61 @@ +#pragma once + +#include "common/upstream/conn_pool_map_impl.h" +#include "common/upstream/priority_conn_pool_map.h" + +namespace Envoy { +namespace Upstream { + +template +PriorityConnPoolMap::PriorityConnPoolMap(Envoy::Event::Dispatcher& dispatcher, + const HostConstSharedPtr& host) { + for (size_t pool_map_index = 0; pool_map_index < NumResourcePriorities; ++pool_map_index) { + ResourcePriority priority = static_cast(pool_map_index); + conn_pool_maps_[pool_map_index].reset(new ConnPoolMapType(dispatcher, host, priority)); + } +} + +template +PriorityConnPoolMap::~PriorityConnPoolMap() = default; + +template +typename PriorityConnPoolMap::OptPoolRef +PriorityConnPoolMap::getPool(ResourcePriority priority, KEY_TYPE key, + const PoolFactory& factory) { + size_t index = static_cast(priority); + ASSERT(index < conn_pool_maps_.size()); + return conn_pool_maps_[index]->getPool(key, factory); +} + +template +size_t PriorityConnPoolMap::size() const { + size_t size = 0; + for (const auto& pool_map : conn_pool_maps_) { + size += pool_map->size(); + } + return size; +} + +template +void PriorityConnPoolMap::clear() { + for (auto& pool_map : conn_pool_maps_) { + pool_map->clear(); + } +} + +template +void PriorityConnPoolMap::addDrainedCallback(const DrainedCb& cb) { + for (auto& pool_map : conn_pool_maps_) { + pool_map->addDrainedCallback(cb); + } +} + +template +void PriorityConnPoolMap::drainConnections() { + for (auto& pool_map : conn_pool_maps_) { + pool_map->drainConnections(); + } +} + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index 4ee575a639042..a887182c63983 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -27,7 +27,7 @@ class ResourceManagerImpl : public ResourceManager { public: ResourceManagerImpl(Runtime::Loader& runtime, const std::string& runtime_key, uint64_t max_connections, uint64_t max_pending_requests, - uint64_t max_requests, uint64_t max_retries, + uint64_t max_requests, uint64_t max_retries, uint64_t max_connection_pools, ClusterCircuitBreakersStats cb_stats) : connections_(max_connections, runtime, runtime_key + "max_connections", cb_stats.cx_open_, cb_stats.remaining_cx_), @@ -36,13 +36,16 @@ class ResourceManagerImpl : public ResourceManager { requests_(max_requests, runtime, runtime_key + "max_requests", cb_stats.rq_open_, cb_stats.remaining_rq_), retries_(max_retries, runtime, runtime_key + "max_retries", cb_stats.rq_retry_open_, - cb_stats.remaining_retries_) {} + cb_stats.remaining_retries_), + connection_pools_(max_connection_pools, runtime, runtime_key + "max_connection_pools", + cb_stats.cx_pool_open_, cb_stats.remaining_cx_pools_) {} // Upstream::ResourceManager Resource& connections() override { return connections_; } Resource& pendingRequests() override { return pending_requests_; } Resource& requests() override { return requests_; } Resource& retries() override { return retries_; } + Resource& connectionPools() override { return connection_pools_; } private: struct ResourceImpl : public Resource { @@ -61,9 +64,10 @@ class ResourceManagerImpl : public ResourceManager { updateRemaining(); open_gauge_.set(canCreate() ? 0 : 1); } - void dec() override { - ASSERT(current_ > 0); - current_--; + void dec() override { decBy(1); } + void decBy(uint64_t amount) override { + ASSERT(current_ >= amount); + current_ -= amount; updateRemaining(); open_gauge_.set(canCreate() ? 0 : 1); } @@ -105,6 +109,7 @@ class ResourceManagerImpl : public ResourceManager { ResourceImpl pending_requests_; ResourceImpl requests_; ResourceImpl retries_; + ResourceImpl connection_pools_; }; typedef std::unique_ptr ResourceManagerImplPtr; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index ef436d8a084c7..301fb23536213 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -846,6 +847,7 @@ ClusterInfoImpl::ResourceManagers::load(const envoy::api::v2::Cluster& config, uint64_t max_pending_requests = 1024; uint64_t max_requests = 1024; uint64_t max_retries = 3; + uint64_t max_connection_pools = std::numeric_limits::max(); bool track_remaining = false; @@ -877,9 +879,12 @@ ClusterInfoImpl::ResourceManagers::load(const envoy::api::v2::Cluster& config, max_requests = PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_requests, max_requests); max_retries = PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_retries, max_retries); track_remaining = it->track_remaining(); + max_connection_pools = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(*it, max_connection_pools, max_connection_pools); } return std::make_unique( runtime, runtime_prefix, max_connections, max_pending_requests, max_requests, max_retries, + max_connection_pools, ClusterInfoImpl::generateCircuitBreakersStats(stats_scope, priority_name, track_remaining)); } diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index c66a2d9a41d55..27324c4a6dbf6 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -207,7 +207,7 @@ struct ActiveTestRequest { * Verify that connections are drained when requested. */ TEST_F(Http1ConnPoolImplTest, DrainConnections) { - cluster_->resetResourceManager(2, 1024, 1024, 1); + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); @@ -292,7 +292,7 @@ TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) { * Test when we overflow max pending requests. */ TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { - cluster_->resetResourceManager(1, 1, 1024, 1); + cluster_->resetResourceManager(1, 1, 1024, 1, 1); EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); @@ -614,7 +614,7 @@ TEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) { } TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { - cluster_->resetResourceManager(2, 1024, 1024, 1); + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 554b873fdea8f..c7b5577154a36 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -609,7 +609,7 @@ TEST_F(Http2ConnPoolImplTest, ConnectTimeout) { } TEST_F(Http2ConnPoolImplTest, MaxGlobalRequests) { - cluster_->resetResourceManager(1024, 1024, 1, 1); + cluster_->resetResourceManager(1024, 1024, 1, 1, 1); InSequence s; expectClientCreate(); diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index b126f70cca3f1..813daa4038b08 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -424,7 +424,7 @@ TEST_F(RouterRetryStateImplTest, RouteConfigNoHeaderConfig) { } TEST_F(RouterRetryStateImplTest, NoAvailableRetries) { - cluster_.resetResourceManager(0, 0, 0, 0); + cluster_.resetResourceManager(0, 0, 0, 0, 0); Http::TestHeaderMapImpl request_headers{{"x-envoy-retry-on", "connect-failure"}}; setup(request_headers); diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 034e4098dbaca..149354109c055 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -271,7 +271,7 @@ struct ActiveTestConn { * Verify that connections are drained when requested. */ TEST_F(TcpConnPoolImplTest, DrainConnections) { - cluster_->resetResourceManager(3, 1024, 1024, 1); + cluster_->resetResourceManager(3, 1024, 1024, 1, 1); InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); @@ -484,7 +484,7 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateLifecycle) { * Test when we overflow max pending requests. */ TEST_F(TcpConnPoolImplTest, MaxPendingRequests) { - cluster_->resetResourceManager(1, 1, 1024, 1); + cluster_->resetResourceManager(1, 1, 1024, 1, 1); ConnPoolCallbacks callbacks; conn_pool_.expectConnCreate(); @@ -657,7 +657,7 @@ TEST_F(TcpConnPoolImplTest, DisconnectWhileBound) { * Test upstream disconnection of one request while another is pending. */ TEST_F(TcpConnPoolImplTest, DisconnectWhilePending) { - cluster_->resetResourceManager(1, 1024, 1024, 1); + cluster_->resetResourceManager(1, 1024, 1024, 1, 1); InSequence s; // First request connected. @@ -767,7 +767,7 @@ TEST_F(TcpConnPoolImplTest, MaxRequestsPerConnection) { * Test that multiple connections can be assigned at once. */ TEST_F(TcpConnPoolImplTest, ConcurrentConnections) { - cluster_->resetResourceManager(2, 1024, 1024, 1); + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); @@ -803,7 +803,7 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { auto* s2 = new TestConnectionState(2, [&]() -> void { state_destroyed |= 2; }); auto* s3 = new TestConnectionState(2, [&]() -> void { state_destroyed |= 4; }); - cluster_->resetResourceManager(2, 1024, 1024, 1); + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); c1.callbacks_.conn_data_->setConnectionState(std::unique_ptr(s1)); ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection); diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 27a056340fe15..ecf3fb691b6cf 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -799,7 +799,7 @@ TEST_F(TcpProxyTest, UpstreamConnectFailure) { TEST_F(TcpProxyTest, UpstreamConnectionLimit) { configure(accessLogConfig("%RESPONSE_FLAGS%")); factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->resetResourceManager( - 0, 0, 0, 0); + 0, 0, 0, 0, 0); // setup sets up expectation for tcpConnForCluster but this test is expected to NOT call that filter_ = std::make_unique(config_, factory_context_.cluster_manager_, timeSystem()); diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 2653380f0070c..b65aa9ef552ab 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -71,6 +71,7 @@ envoy_cc_test( "//test/mocks:common_lib", "//test/mocks/event:event_mocks", "//test/mocks/http:conn_pool_mocks", + "//test/mocks/upstream:host_mocks", "//test/test_common:utility_lib", ], ) @@ -267,6 +268,20 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "priority_conn_pool_map_impl_test", + srcs = ["priority_conn_pool_map_impl_test.cc"], + deps = [ + "//include/envoy/http:conn_pool_interface", + "//source/common/upstream:priority_conn_pool_map_impl_lib", + "//test/mocks:common_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/http:conn_pool_mocks", + "//test/mocks/upstream:host_mocks", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "resource_manager_impl_test", srcs = ["resource_manager_impl_test.cc"], diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 5731e0c403c68..a15b364c8b2d6 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -1688,7 +1688,6 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { // drain callbacks, etc. dns_timer_->callback_(); dns_callback(TestUtility::makeDnsResponse({"127.0.0.2", "127.0.0.3"})); - factory_.tls_.shutdownThread(); } @@ -1915,7 +1914,6 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { // drain callbacks, etc. dns_timer_->callback_(); dns_callback(TestUtility::makeDnsResponse({"127.0.0.2", "127.0.0.3"})); - factory_.tls_.shutdownThread(); } diff --git a/test/common/upstream/conn_pool_map_impl_test.cc b/test/common/upstream/conn_pool_map_impl_test.cc index 93f5a7cc6ed7e..b8183d2eae29b 100644 --- a/test/common/upstream/conn_pool_map_impl_test.cc +++ b/test/common/upstream/conn_pool_map_impl_test.cc @@ -8,11 +8,13 @@ #include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/mocks/http/conn_pool.h" +#include "test/mocks/upstream/host.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::AtLeast; using testing::Invoke; using testing::InvokeArgument; using testing::NiceMock; @@ -30,10 +32,17 @@ class ConnPoolMapImplTest : public testing::Test { using TestMap = ConnPoolMap; using TestMapPtr = std::unique_ptr; - TestMapPtr makeTestMap() { return std::make_unique(dispatcher_, absl::nullopt); } + TestMapPtr makeTestMap() { + return std::make_unique(dispatcher_, host_, ResourcePriority::Default); + } TestMapPtr makeTestMapWithLimit(uint64_t limit) { - return std::make_unique(dispatcher_, absl::make_optional(limit)); + return makeTestMapWithLimitAtPriority(limit, ResourcePriority::Default); + } + + TestMapPtr makeTestMapWithLimitAtPriority(uint64_t limit, ResourcePriority priority) { + host_->cluster_.resetResourceManager(1024, 1024, 1024, 1024, limit); + return std::make_unique(dispatcher_, host_, priority); } TestMap::PoolFactory getBasicFactory() { @@ -73,6 +82,7 @@ class ConnPoolMapImplTest : public testing::Test { protected: NiceMock dispatcher_; std::vector*> mock_pools_; + std::shared_ptr> host_ = std::make_shared>(); }; TEST_F(ConnPoolMapImplTest, TestMapIsEmptyOnConstruction) { @@ -295,6 +305,58 @@ TEST_F(ConnPoolMapImplTest, GetPoolFailStateIsCleared) { EXPECT_EQ(test_map->size(), 2); } +TEST_F(ConnPoolMapImplTest, CircuitBreakerNotSetOnClear) { + TestMapPtr test_map = makeTestMapWithLimit(1); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + test_map->getPool(3, getBasicFactory()); + + test_map->clear(); + + EXPECT_EQ(host_->cluster_.circuit_breakers_stats_.cx_pool_open_.value(), 0); +} + +TEST_F(ConnPoolMapImplTest, CircuitBreakerSetAtLimit) { + TestMapPtr test_map = makeTestMapWithLimit(2); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + + EXPECT_EQ(host_->cluster_.circuit_breakers_stats_.cx_pool_open_.value(), 1); +} + +TEST_F(ConnPoolMapImplTest, CircuitBreakerClearedOnDestroy) { + { + TestMapPtr test_map = makeTestMapWithLimit(2); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + } + + EXPECT_EQ(host_->cluster_.circuit_breakers_stats_.cx_pool_open_.value(), 0); +} + +TEST_F(ConnPoolMapImplTest, CircuitBreakerUsesProvidedPriorityDefault) { + TestMapPtr test_map = makeTestMapWithLimitAtPriority(2, ResourcePriority::Default); + + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(0); + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(AtLeast(1)); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); +} + +TEST_F(ConnPoolMapImplTest, CircuitBreakerUsesProvidedPriorityHigh) { + TestMapPtr test_map = makeTestMapWithLimitAtPriority(2, ResourcePriority::High); + + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(AtLeast(1)); + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(0); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); +} + // The following tests only die in debug builds, so don't run them if this isn't one. #if !defined(NDEBUG) class ConnPoolMapImplDeathTest : public ConnPoolMapImplTest {}; diff --git a/test/common/upstream/priority_conn_pool_map_impl_test.cc b/test/common/upstream/priority_conn_pool_map_impl_test.cc new file mode 100644 index 0000000000000..fbe98e4d17e1c --- /dev/null +++ b/test/common/upstream/priority_conn_pool_map_impl_test.cc @@ -0,0 +1,142 @@ +#include + +#include "envoy/http/conn_pool.h" + +#include "common/upstream/priority_conn_pool_map_impl.h" + +#include "test/mocks/common.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/conn_pool.h" +#include "test/mocks/upstream/host.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::AtLeast; +using testing::Return; +using testing::SaveArg; + +namespace Envoy { +namespace Upstream { +namespace { + +class PriorityConnPoolMapImplTest : public testing::Test { +public: + using TestMap = PriorityConnPoolMap; + using TestMapPtr = std::unique_ptr; + + TestMapPtr makeTestMap() { return std::make_unique(dispatcher_, host_); } + + TestMap::PoolFactory getBasicFactory() { + return [&]() { + auto pool = std::make_unique>(); + ON_CALL(*pool, hasActiveConnections).WillByDefault(Return(false)); + mock_pools_.push_back(pool.get()); + return pool; + }; + } + +protected: + NiceMock dispatcher_; + std::vector*> mock_pools_; + std::shared_ptr> host_ = std::make_shared>(); +}; + +// Show that we return a non-null value, and that we invoke the default resource manager +TEST_F(PriorityConnPoolMapImplTest, DefaultPriorityProxiedThrough) { + TestMapPtr test_map = makeTestMap(); + + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(0); + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(AtLeast(1)); + + auto pool = test_map->getPool(ResourcePriority::Default, 0, getBasicFactory()); + EXPECT_TRUE(pool.has_value()); + + // At this point, we may clean up/decrement by 0, etc, so allow any number. + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(AtLeast(1)); +} + +// Show that we return a non-null value, and that we invoke the high resource manager +TEST_F(PriorityConnPoolMapImplTest, HighPriorityProxiedThrough) { + TestMapPtr test_map = makeTestMap(); + + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(0); + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::High)).Times(AtLeast(1)); + + auto pool = test_map->getPool(ResourcePriority::High, 0, getBasicFactory()); + EXPECT_TRUE(pool.has_value()); + + // At this point, we may clean up/decrement by 0, etc, so allow any number. + EXPECT_CALL(host_->cluster_, resourceManager(ResourcePriority::Default)).Times(AtLeast(1)); +} + +TEST_F(PriorityConnPoolMapImplTest, TestSizeForSinglePriority) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(ResourcePriority::High, 0, getBasicFactory()); + test_map->getPool(ResourcePriority::High, 1, getBasicFactory()); + + EXPECT_EQ(test_map->size(), 2); +} + +TEST_F(PriorityConnPoolMapImplTest, TestSizeForMultiplePriorities) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(ResourcePriority::High, 0, getBasicFactory()); + test_map->getPool(ResourcePriority::High, 1, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 0, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 1, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 2, getBasicFactory()); + + EXPECT_EQ(test_map->size(), 5); +} + +TEST_F(PriorityConnPoolMapImplTest, TestClearEmptiesOut) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(ResourcePriority::High, 0, getBasicFactory()); + test_map->getPool(ResourcePriority::High, 1, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 0, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 1, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 2, getBasicFactory()); + test_map->clear(); + + EXPECT_EQ(test_map->size(), 0); +} + +// Show that the drained callback is invoked once for the high priority pool, and once for +// the default priority pool. +TEST_F(PriorityConnPoolMapImplTest, TestAddDrainedCbProxiedThrough) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(ResourcePriority::High, 0, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 0, getBasicFactory()); + + Http::ConnectionPool::Instance::DrainedCb cbHigh; + EXPECT_CALL(*mock_pools_[0], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cbHigh)); + Http::ConnectionPool::Instance::DrainedCb cbDefault; + EXPECT_CALL(*mock_pools_[1], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cbDefault)); + + ReadyWatcher watcher; + test_map->addDrainedCallback([&watcher] { watcher.ready(); }); + + EXPECT_CALL(watcher, ready()).Times(2); + cbHigh(); + cbDefault(); +} + +TEST_F(PriorityConnPoolMapImplTest, TestDrainConnectionsProxiedThrough) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(ResourcePriority::High, 0, getBasicFactory()); + test_map->getPool(ResourcePriority::Default, 0, getBasicFactory()); + + EXPECT_CALL(*mock_pools_[0], drainConnections()); + EXPECT_CALL(*mock_pools_[1], drainConnections()); + + test_map->drainConnections(); +} + +} // namespace +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/resource_manager_impl_test.cc b/test/common/upstream/resource_manager_impl_test.cc index 0e9c6e1eb3bc2..118a453fb8519 100644 --- a/test/common/upstream/resource_manager_impl_test.cc +++ b/test/common/upstream/resource_manager_impl_test.cc @@ -25,7 +25,7 @@ TEST(ResourceManagerImplTest, RuntimeResourceManager) { ON_CALL(store, gauge(_)).WillByDefault(ReturnRef(gauge)); ResourceManagerImpl resource_manager( - runtime, "circuit_breakers.runtime_resource_manager_test.default.", 0, 0, 0, 1, + runtime, "circuit_breakers.runtime_resource_manager_test.default.", 0, 0, 0, 1, 0, ClusterCircuitBreakersStats{ ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE(store), POOL_GAUGE(store))}); @@ -58,6 +58,13 @@ TEST(ResourceManagerImplTest, RuntimeResourceManager) { .WillRepeatedly(Return(0U)); EXPECT_EQ(0U, resource_manager.retries().max()); EXPECT_FALSE(resource_manager.retries().canCreate()); + EXPECT_CALL( + runtime.snapshot_, + getInteger("circuit_breakers.runtime_resource_manager_test.default.max_connection_pools", 0U)) + .Times(2) + .WillRepeatedly(Return(5U)); + EXPECT_EQ(5U, resource_manager.connectionPools().max()); + EXPECT_TRUE(resource_manager.connectionPools().canCreate()); } TEST(ResourceManagerImplTest, RemainingResourceGauges) { @@ -67,7 +74,7 @@ TEST(ResourceManagerImplTest, RemainingResourceGauges) { auto stats = ClusterCircuitBreakersStats{ ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE(store), POOL_GAUGE(store))}; ResourceManagerImpl resource_manager( - runtime, "circuit_breakers.runtime_resource_manager_test.default.", 1, 2, 1, 0, stats); + runtime, "circuit_breakers.runtime_resource_manager_test.default.", 1, 2, 1, 0, 3, stats); // Test remaining_cx_ gauge EXPECT_EQ(1U, resource_manager.connections().max()); @@ -104,7 +111,14 @@ TEST(ResourceManagerImplTest, RemainingResourceGauges) { resource_manager.retries().inc(); EXPECT_EQ(0U, stats.remaining_retries_.value()); resource_manager.retries().dec(); - EXPECT_EQ(0U, stats.remaining_retries_.value()); + + // Test remaining_cx_pools gauge. + EXPECT_EQ(3U, resource_manager.connectionPools().max()); + EXPECT_EQ(3U, stats.remaining_cx_pools_.value()); + resource_manager.connectionPools().inc(); + EXPECT_EQ(2U, stats.remaining_cx_pools_.value()); + resource_manager.connectionPools().dec(); + EXPECT_EQ(3U, stats.remaining_cx_pools_.value()); } } // namespace } // namespace Upstream diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 0141026a9478c..1cb548c9b9ae0 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -749,6 +749,34 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) { 1); } +// Test that if no connection pools are free, Envoy fails to establish an upstream connection. +TEST_P(IntegrationTest, NoConnectionPoolsFree) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + + // Somewhat contrived with 0, but this is the simplest way to test right now. + auto* circuit_breakers = cluster->mutable_circuit_breakers(); + circuit_breakers->add_thresholds()->mutable_max_connection_pools()->set_value(0); + }); + + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Request 1. + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + + // Validate none active. + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_active", 0); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_pending_active", 0); + + response->waitForEndStream(); + + EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_503", 1); +} + INSTANTIATE_TEST_SUITE_P(IpVersions, UpstreamEndpointIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index a0ef49d23331b..5497a0cd2bbe2 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -1,5 +1,7 @@ #include "test/mocks/upstream/cluster_info.h" +#include + #include "common/network/raw_buffer_socket.h" #include "common/upstream/upstream_impl.h" @@ -35,6 +37,7 @@ MockClusterInfo::MockClusterInfo() circuit_breakers_stats_( ClusterInfoImpl::generateCircuitBreakersStats(stats_store_, "default", true)), resource_manager_(new Upstream::ResourceManagerImpl(runtime_, "fake_key", 1, 1024, 1024, 1, + std::numeric_limits::max(), circuit_breakers_stats_)) { ON_CALL(*this, connectTimeout()).WillByDefault(Return(std::chrono::milliseconds(1))); ON_CALL(*this, idleTimeout()).WillByDefault(Return(absl::optional())); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index d52c58b00221a..dd498b8fae26c 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -46,9 +46,10 @@ class MockClusterInfo : public ClusterInfo { MockClusterInfo(); ~MockClusterInfo(); - void resetResourceManager(uint64_t cx, uint64_t rq_pending, uint64_t rq, uint64_t rq_retry) { - resource_manager_ = std::make_unique(runtime_, name_, cx, rq_pending, rq, - rq_retry, circuit_breakers_stats_); + void resetResourceManager(uint64_t cx, uint64_t rq_pending, uint64_t rq, uint64_t rq_retry, + uint64_t conn_pool) { + resource_manager_ = std::make_unique( + runtime_, name_, cx, rq_pending, rq, rq_retry, conn_pool, circuit_breakers_stats_); } // Upstream::ClusterInfo From aab05453ff2e354093f70d0b1160e25e6cd79a66 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 26 Mar 2019 15:09:43 -0400 Subject: [PATCH 016/165] docs: fixing a bad merge (#6385) Signed-off-by: Alyssa Wilk --- DEPRECATED.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/DEPRECATED.md b/DEPRECATED.md index 0e331d82556af..50c777aa85a2e 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -11,8 +11,6 @@ A logged warning is expected for each deprecated item that is in deprecation win * Use of `enabled` in `CorsPolicy`, found in [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). Set the `filter_enabled` field instead. -* Use of google.protobuf.Struct for extension opaque configs is deprecated. Use google.protobuf.Any instead or pack -google.protobuf.Struct in google.protobuf.Any. * Use of `cluster`, found in [redis-proxy.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto) is deprecated. Set a `PrefixRoutes.catch_all_cluster` instead. ## Version 1.9.0 (Dec 20, 2018) From 805683f835bd63e4b7b9d89059aa0d3783924a93 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 26 Mar 2019 14:13:44 -0700 Subject: [PATCH 017/165] http fault: implement header controlled faults (#6318) Part of https://github.com/envoyproxy/envoy/issues/5942 Signed-off-by: Matt Klein --- DEPRECATED.md | 4 + api/envoy/config/filter/fault/v2/fault.proto | 26 +++- .../config/filter/http/fault/v2/fault.proto | 4 +- .../http_filters/fault_filter.rst | 75 +++++++--- docs/root/intro/version_history.rst | 10 +- source/common/config/filter_json.cc | 2 - source/extensions/filters/common/fault/BUILD | 20 +++ .../filters/common/fault/fault_config.cc | 78 +++++++++++ .../filters/common/fault/fault_config.h | 130 ++++++++++++++++++ source/extensions/filters/http/fault/BUILD | 1 + .../filters/http/fault/fault_filter.cc | 74 ++++++---- .../filters/http/fault/fault_filter.h | 29 ++-- .../filters/network/mongo_proxy/BUILD | 1 + .../filters/network/mongo_proxy/config.cc | 9 +- .../filters/network/mongo_proxy/proxy.cc | 37 ++--- .../filters/network/mongo_proxy/proxy.h | 31 +---- test/extensions/filters/common/fault/BUILD | 18 +++ .../filters/common/fault/fault_config_test.cc | 60 ++++++++ .../fault/fault_filter_integration_test.cc | 79 ++++++++++- .../filters/http/fault/fault_filter_test.cc | 19 --- .../filters/network/mongo_proxy/proxy_test.cc | 15 +- test/test_common/utility.h | 7 + 22 files changed, 579 insertions(+), 150 deletions(-) create mode 100644 source/extensions/filters/common/fault/BUILD create mode 100644 source/extensions/filters/common/fault/fault_config.cc create mode 100644 source/extensions/filters/common/fault/fault_config.h create mode 100644 test/extensions/filters/common/fault/BUILD create mode 100644 test/extensions/filters/common/fault/fault_config_test.cc diff --git a/DEPRECATED.md b/DEPRECATED.md index 50c777aa85a2e..502987c09035d 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -11,6 +11,10 @@ A logged warning is expected for each deprecated item that is in deprecation win * Use of `enabled` in `CorsPolicy`, found in [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). Set the `filter_enabled` field instead. +* Use of the `type` field in the `FaultDelay` message (found in + [fault.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/fault/v2/fault.proto)) + has been deprecated. It was never used and setting it has no effect. It will be removed in the + following release. * Use of `cluster`, found in [redis-proxy.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto) is deprecated. Set a `PrefixRoutes.catch_all_cluster` instead. ## Version 1.9.0 (Dec 20, 2018) diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto index 89d1dc2c55ff5..f27f9d446267f 100644 --- a/api/envoy/config/filter/fault/v2/fault.proto +++ b/api/envoy/config/filter/fault/v2/fault.proto @@ -19,19 +19,25 @@ import "gogoproto/gogo.proto"; // Delay specification is used to inject latency into the // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. message FaultDelay { + // Fault delays are controlled via an HTTP header (if applicable). See the + // :ref:`http fault filter ` documentation for + // more information. + message HeaderDelay { + } + enum FaultDelayType { - // Fixed delay (step function). + // Unused and deprecated. FIXED = 0; } - // Delay type to use (fixed|exponential|..). Currently, only fixed delay (step function) is - // supported. - FaultDelayType type = 1 [(validate.rules).enum.defined_only = true]; + // Unused and deprecated. Will be removed in the next release. + FaultDelayType type = 1 [deprecated = true]; reserved 2; oneof fault_delay_secifier { option (validate.required) = true; + // Add a fixed delay before forwarding the operation upstream. See // https://developers.google.com/protocol-buffers/docs/proto3#json for // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified @@ -40,6 +46,9 @@ message FaultDelay { // for the specified period. This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Fault delays are controlled via an HTTP header (if applicable). + HeaderDelay header_delay = 5; } // The percentage of operations/connections/requests on which the delay will be injected. @@ -54,11 +63,20 @@ message FaultRateLimit { uint64 limit_kbps = 1 [(validate.rules).uint64.gte = 1]; } + // Rate limits are controlled via an HTTP header (if applicable). See the + // :ref:`http fault filter ` documentation for + // more information. + message HeaderLimit { + } + oneof limit_type { option (validate.required) = true; // A fixed rate limit. FixedLimit fixed_limit = 1; + + // Rate limits are controlled via an HTTP header (if applicable). + HeaderLimit header_limit = 3; } // The percentage of operations/connections/requests on which the rate limit will be injected. diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto index df4258968ab66..bc491580bb152 100644 --- a/api/envoy/config/filter/http/fault/v2/fault.proto +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -80,7 +80,9 @@ message HTTPFault { // amount due to the implementation details. google.protobuf.UInt32Value max_active_faults = 6; - // The response rate limit to be applied to the response body of the stream. + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. // // .. attention:: // This is a per-stream limit versus a connection level limit. This means that concurrent streams diff --git a/docs/root/configuration/http_filters/fault_filter.rst b/docs/root/configuration/http_filters/fault_filter.rst index 39de89628fe29..90b11404b90bc 100644 --- a/docs/root/configuration/http_filters/fault_filter.rst +++ b/docs/root/configuration/http_filters/fault_filter.rst @@ -16,15 +16,6 @@ The scope of failures is restricted to those that are observable by an application communicating over the network. CPU and disk failures on the local host cannot be emulated. -Currently, the fault injection filter has the following limitations: - -* Abort codes are restricted to HTTP status codes only -* Delays are restricted to fixed duration. - -Future versions will include support for restricting faults to specific -routes, injecting *gRPC* and *HTTP/2* specific error codes and delay -durations based on distributions. - Configuration ------------- @@ -36,6 +27,44 @@ Configuration * :ref:`v2 API reference ` * This filter should be configured with the name *envoy.fault*. +.. _config_http_filters_fault_injection_http_header: + +Controlling fault injection via HTTP headers +-------------------------------------------- + +The fault filter has the capability to allow fault configuration to be specified by the caller. +This is useful in certain scenarios in which it is desired to allow the client to specify its own +fault configuration. The currently supported header controls are: + +* Request delay configuration via the *x-envoy-fault-delay-request* header. The header value + should be an integer that specifies the number of milliseconds to throttle the latency for. +* Response rate limit configuration via the *x-envoy-fault-throughput-response* header. The + header value should be an integer that specified the limit in KiB/s and must be > 0. + +.. attention:: + + Allowing header control is inherently dangerous if exposed to untrusted clients. In this case, + it is suggested to use the :ref:`max_active_faults + ` setting to limit the + maximum concurrent faults that can be active at any given time. + +The following is an example configuration that enables header control for both of the above +options: + +.. code-block:: yaml + + name: envoy.fault + config: + max_active_faults: 100 + delay: + header_delay: {} + percentage: + numerator: 100 + response_rate_limit: + header_limit: {} + percentage: + numerator: 100 + .. _config_http_filters_fault_injection_runtime: Runtime @@ -43,26 +72,38 @@ Runtime The HTTP fault injection filter supports the following global runtime settings: +.. attention:: + + Some of the following runtime keys require the filter to be configured for the specific fault + type and some do not. Please consult the documentation for each key for more information. + fault.http.abort.abort_percent % of requests that will be aborted if the headers match. Defaults to the *abort_percent* specified in config. If the config does not contain an - *abort* block, then *abort_percent* defaults to 0. + *abort* block, then *abort_percent* defaults to 0. For historic reasons, this runtime key is + available regardless of whether the filter is :ref:`configured for abort + `. fault.http.abort.http_status HTTP status code that will be used as the of requests that will be aborted if the headers match. Defaults to the HTTP status code specified in the config. If the config does not contain an *abort* block, then - *http_status* defaults to 0. + *http_status* defaults to 0. For historic reasons, this runtime key is + available regardless of whether the filter is :ref:`configured for abort + `. fault.http.delay.fixed_delay_percent % of requests that will be delayed if the headers match. Defaults to the - *delay_percent* specified in the config or 0 otherwise. + *delay_percent* specified in the config or 0 otherwise. This runtime key is only available when + the filter is :ref:`configured for delay + `. fault.http.delay.fixed_duration_ms The delay duration in milliseconds. If not specified, the *fixed_duration_ms* specified in the config will be used. If this field is missing from both the runtime and the config, no delays will be - injected. + injected. This runtime key is only available when the filter is :ref:`configured for delay + `. fault.http.max_active_faults The maximum number of active faults (of all types) that Envoy will will inject via the fault @@ -72,10 +113,10 @@ fault.http.max_active_faults ` setting will be used. fault.http.rate_limit.response_percent - % of requests which will have a response rate limit fault injected, if the filter is - :ref:`configured ` to - do so. Defaults to the value set in the :ref:`percentage - ` field. + % of requests which will have a response rate limit fault injected. Defaults to the value set in + the :ref:`percentage ` field. + This runtime key is only available when the filter is :ref:`configured for response rate limiting + `. *Note*, fault filter runtime settings for the specific downstream cluster override the default ones if present. The following are downstream specific diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index e9a2a7c75434a..8032b072d2f00 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -36,18 +36,20 @@ Version history ` setting, as well as :ref:`statistics ` for the number of active faults and the number of faults the overflowed. -* fault: add :ref:`response rate limit +* fault: added :ref:`response rate limit ` fault injection. +* fault: added :ref:`HTTP header fault configuration + ` to the HTTP fault filter. * governance: extending Envoy deprecation policy from 1 release (0-3 months) to 2 releases (3-6 months). * health check: expected response codes in http health checks are now :ref:`configurable `. * http: added new grpc_http1_reverse_bridge filter for converting gRPC requests into HTTP/1.1 requests. * http: fixed a bug where Content-Length:0 was added to HTTP/1 204 responses. -* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. -* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to ::ref:`MySQL proxy` for more details. * http: added :ref:`max request headers size `. The default behaviour is unchanged. * http: added modifyDecodingBuffer/modifyEncodingBuffer to allow modifying the buffered request/response data. -* performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). * http: added encodeComplete/decodeComplete. These are invoked at the end of the stream, after all data has been encoded/decoded respectively. Default implementation is a no-op. +* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. +* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy` for more details. +* performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). * ratelimit: removed deprecated rate limit configuration from bootstrap. * redis: added :ref:`hashtagging ` to guarantee a given key's upstream. * redis: added :ref:`latency stats ` for commands. diff --git a/source/common/config/filter_json.cc b/source/common/config/filter_json.cc index 83abe60954b47..9dd65d48574b0 100644 --- a/source/common/config/filter_json.cc +++ b/source/common/config/filter_json.cc @@ -252,7 +252,6 @@ void FilterJson::translateMongoProxy( auto* delay = proto_config.mutable_delay(); auto* percentage = delay->mutable_percentage(); - delay->set_type(envoy::config::filter::fault::v2::FaultDelay::FIXED); percentage->set_numerator(static_cast(json_fault->getInteger("percent"))); percentage->set_denominator(envoy::type::FractionalPercent::HUNDRED); JSON_UTIL_SET_DURATION_FROM_FIELD(*json_fault, *delay, fixed_delay, duration); @@ -282,7 +281,6 @@ void FilterJson::translateFaultFilter( if (!json_config_delay->empty()) { auto* delay = proto_config.mutable_delay(); auto* percentage = delay->mutable_percentage(); - delay->set_type(envoy::config::filter::fault::v2::FaultDelay::FIXED); percentage->set_numerator( static_cast(json_config_delay->getInteger("fixed_delay_percent"))); percentage->set_denominator(envoy::type::FractionalPercent::HUNDRED); diff --git a/source/extensions/filters/common/fault/BUILD b/source/extensions/filters/common/fault/BUILD new file mode 100644 index 0000000000000..b8607b4f861bf --- /dev/null +++ b/source/extensions/filters/common/fault/BUILD @@ -0,0 +1,20 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "fault_config_lib", + srcs = ["fault_config.cc"], + hdrs = ["fault_config.h"], + deps = [ + "//include/envoy/http:header_map_interface", + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/filter/fault/v2:fault_cc", + ], +) diff --git a/source/extensions/filters/common/fault/fault_config.cc b/source/extensions/filters/common/fault/fault_config.cc new file mode 100644 index 0000000000000..f6e4f73ad6fe7 --- /dev/null +++ b/source/extensions/filters/common/fault/fault_config.cc @@ -0,0 +1,78 @@ +#include "extensions/filters/common/fault/fault_config.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace Fault { + +FaultDelayConfig::FaultDelayConfig(const envoy::config::filter::fault::v2::FaultDelay& delay_config) + : percentage_(delay_config.percentage()) { + switch (delay_config.fault_delay_secifier_case()) { + case envoy::config::filter::fault::v2::FaultDelay::kFixedDelay: + provider_ = std::make_unique( + std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(delay_config, fixed_delay))); + break; + case envoy::config::filter::fault::v2::FaultDelay::kHeaderDelay: + provider_ = std::make_unique(); + break; + case envoy::config::filter::fault::v2::FaultDelay::FAULT_DELAY_SECIFIER_NOT_SET: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +absl::optional +FaultDelayConfig::HeaderDelayProvider::duration(const Http::HeaderEntry* header) const { + if (header == nullptr) { + return absl::nullopt; + } + + uint64_t value; + if (!StringUtil::atoull(header->value().getStringView().data(), value)) { + return absl::nullopt; + } + + return std::chrono::milliseconds(value); +} + +FaultRateLimitConfig::FaultRateLimitConfig( + const envoy::config::filter::fault::v2::FaultRateLimit& rate_limit_config) + : percentage_(rate_limit_config.percentage()) { + switch (rate_limit_config.limit_type_case()) { + case envoy::config::filter::fault::v2::FaultRateLimit::kFixedLimit: + provider_ = + std::make_unique(rate_limit_config.fixed_limit().limit_kbps()); + break; + case envoy::config::filter::fault::v2::FaultRateLimit::kHeaderLimit: + provider_ = std::make_unique(); + break; + case envoy::config::filter::fault::v2::FaultRateLimit::LIMIT_TYPE_NOT_SET: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +absl::optional +FaultRateLimitConfig::HeaderRateLimitProvider::rateKbps(const Http::HeaderEntry* header) const { + if (header == nullptr) { + return absl::nullopt; + } + + uint64_t value; + if (!StringUtil::atoull(header->value().getStringView().data(), value)) { + return absl::nullopt; + } + + if (value == 0) { + return absl::nullopt; + } + + return value; +} + +} // namespace Fault +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/common/fault/fault_config.h b/source/extensions/filters/common/fault/fault_config.h new file mode 100644 index 0000000000000..61b3ada9eda70 --- /dev/null +++ b/source/extensions/filters/common/fault/fault_config.h @@ -0,0 +1,130 @@ +#pragma once + +#include "envoy/config/filter/fault/v2/fault.pb.h" +#include "envoy/http/header_map.h" + +#include "common/singleton/const_singleton.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace Fault { + +class HeaderNameValues { +public: + const Http::LowerCaseString DelayRequest{"x-envoy-fault-delay-request"}; + const Http::LowerCaseString ThroughputResponse{"x-envoy-fault-throughput-response"}; +}; + +typedef ConstSingleton HeaderNames; + +/** + * Generic configuration for a delay fault. + */ +class FaultDelayConfig { +public: + FaultDelayConfig(const envoy::config::filter::fault::v2::FaultDelay& delay_config); + + const envoy::type::FractionalPercent& percentage() const { return percentage_; } + absl::optional duration(const Http::HeaderEntry* header) const { + return provider_->duration(header); + } + +private: + // Abstract delay provider. + class DelayProvider { + public: + virtual ~DelayProvider() = default; + + // Return the duration to use. Optionally passed an HTTP header that may contain the delay + // depending on the provider implementation. + virtual absl::optional + duration(const Http::HeaderEntry* header) const PURE; + }; + + // Delay provider that uses a fixed delay. + class FixedDelayProvider : public DelayProvider { + public: + FixedDelayProvider(std::chrono::milliseconds delay) : delay_(delay) {} + + // DelayProvider + absl::optional duration(const Http::HeaderEntry*) const override { + return delay_; + } + + private: + const std::chrono::milliseconds delay_; + }; + + // Delay provider the reads a delay from an HTTP header. + class HeaderDelayProvider : public DelayProvider { + public: + // DelayProvider + absl::optional + duration(const Http::HeaderEntry* header) const override; + }; + + using DelayProviderPtr = std::unique_ptr; + + DelayProviderPtr provider_; + const envoy::type::FractionalPercent percentage_; +}; + +using FaultDelayConfigPtr = std::unique_ptr; +using FaultDelayConfigSharedPtr = std::shared_ptr; + +/** + * Generic configuration for a rate limit fault. + */ +class FaultRateLimitConfig { +public: + FaultRateLimitConfig(const envoy::config::filter::fault::v2::FaultRateLimit& rate_limit_config); + + const envoy::type::FractionalPercent& percentage() const { return percentage_; } + absl::optional rateKbps(const Http::HeaderEntry* header) const { + return provider_->rateKbps(header); + } + +private: + // Abstract rate limit provider. + class RateLimitProvider { + public: + virtual ~RateLimitProvider() = default; + + // Return the rate limit to use in KiB/s. Optionally passed an HTTP header that may contain the + // rate limit depending on the provider implementation. + virtual absl::optional rateKbps(const Http::HeaderEntry* header) const PURE; + }; + + // Rate limit provider that uses a fixed rate limit. + class FixedRateLimitProvider : public RateLimitProvider { + public: + FixedRateLimitProvider(uint64_t fixed_rate_kbps) : fixed_rate_kbps_(fixed_rate_kbps) {} + absl::optional rateKbps(const Http::HeaderEntry*) const override { + return fixed_rate_kbps_; + } + + private: + const uint64_t fixed_rate_kbps_; + }; + + // Rate limit provider that reads the rate limit from an HTTP header. + class HeaderRateLimitProvider : public RateLimitProvider { + public: + absl::optional rateKbps(const Http::HeaderEntry* header) const override; + }; + + using RateLimitProviderPtr = std::unique_ptr; + + RateLimitProviderPtr provider_; + const envoy::type::FractionalPercent percentage_; +}; + +using FaultRateLimitConfigPtr = std::unique_ptr; + +} // namespace Fault +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index 4c282cb5e3fa1..a5b48b5af103c 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -32,6 +32,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/filters/common/fault:fault_config_lib", "@envoy_api//envoy/config/filter/http/fault/v2:fault_cc", ], ) diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 737c92733d163..6c10fd2ff5fcf 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -34,9 +34,8 @@ FaultSettings::FaultSettings(const envoy::config::filter::http::fault::v2::HTTPF } if (fault.has_delay()) { - const auto& delay = fault.delay(); - fixed_delay_percentage_ = delay.percentage(); - fixed_duration_ms_ = PROTOBUF_GET_MS_OR_DEFAULT(delay, fixed_delay, 0); + request_delay_config_ = + std::make_unique(fault.delay()); } for (const Http::HeaderUtility::HeaderData& header_map : fault.headers()) { @@ -54,11 +53,8 @@ FaultSettings::FaultSettings(const envoy::config::filter::http::fault::v2::HTTPF } if (fault.has_response_rate_limit()) { - RateLimit rate_limit; - ASSERT(fault.response_rate_limit().has_fixed_limit()); - rate_limit.fixed_rate_kbps_ = fault.response_rate_limit().fixed_limit().limit_kbps(); - rate_limit.percentage_ = fault.response_rate_limit().percentage(); - response_rate_limit_ = rate_limit; + response_rate_limit_ = + std::make_unique(fault.response_rate_limit()); } } @@ -122,13 +118,14 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b fmt::format("fault.http.{}.abort.http_status", downstream_cluster_); } - maybeSetupResponseRateLimit(); + maybeSetupResponseRateLimit(headers); - absl::optional duration_ms = delayDuration(); - if (duration_ms) { + absl::optional duration = delayDuration(headers); + if (duration.has_value()) { delay_timer_ = decoder_callbacks_->dispatcher().createTimer([this]() -> void { postDelayInjection(); }); - delay_timer_->enableTimer(std::chrono::milliseconds(duration_ms.value())); + ENVOY_LOG(debug, "fault: delaying request {}ms", duration.value().count()); + delay_timer_->enableTimer(duration.value()); recordDelaysInjectedStats(); decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DelayInjected); return Http::FilterHeadersStatus::StopIteration; @@ -142,15 +139,21 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b return Http::FilterHeadersStatus::Continue; } -void FaultFilter::maybeSetupResponseRateLimit() { - if (!fault_settings_->responseRateLimit().has_value()) { +void FaultFilter::maybeSetupResponseRateLimit(const Http::HeaderMap& request_headers) { + if (fault_settings_->responseRateLimit() == nullptr) { + return; + } + + absl::optional rate_kbps = fault_settings_->responseRateLimit()->rateKbps( + request_headers.get(Filters::Common::Fault::HeaderNames::get().ThroughputResponse)); + if (!rate_kbps.has_value()) { return; } // TODO(mattklein123): Allow runtime override via downstream cluster similar to the other keys. if (!config_->runtime().snapshot().featureEnabled( - RuntimeKeys::get().ResponseRateLimitKey, - fault_settings_->responseRateLimit().value().percentage_)) { + RuntimeKeys::get().ResponseRateLimitPercentKey, + fault_settings_->responseRateLimit()->percentage())) { return; } @@ -159,8 +162,7 @@ void FaultFilter::maybeSetupResponseRateLimit() { config_->stats().response_rl_injected_.inc(); response_limiter_ = std::make_unique( - fault_settings_->responseRateLimit().value().fixed_rate_kbps_, - encoder_callbacks_->encoderBufferLimit(), + rate_kbps.value(), encoder_callbacks_->encoderBufferLimit(), [this] { encoder_callbacks_->onEncoderFilterAboveWriteBufferHighWatermark(); }, [this] { encoder_callbacks_->onEncoderFilterBelowWriteBufferLowWatermark(); }, [this](Buffer::Instance& data, bool end_stream) { @@ -186,11 +188,15 @@ bool FaultFilter::faultOverflow() { } bool FaultFilter::isDelayEnabled() { - bool enabled = config_->runtime().snapshot().featureEnabled(RuntimeKeys::get().DelayPercentKey, - fault_settings_->delayPercentage()); + if (fault_settings_->requestDelay() == nullptr) { + return false; + } + + bool enabled = config_->runtime().snapshot().featureEnabled( + RuntimeKeys::get().DelayPercentKey, fault_settings_->requestDelay()->percentage()); if (!downstream_cluster_delay_percent_key_.empty()) { - enabled |= config_->runtime().snapshot().featureEnabled(downstream_cluster_delay_percent_key_, - fault_settings_->delayPercentage()); + enabled |= config_->runtime().snapshot().featureEnabled( + downstream_cluster_delay_percent_key_, fault_settings_->requestDelay()->percentage()); } return enabled; } @@ -205,22 +211,32 @@ bool FaultFilter::isAbortEnabled() { return enabled; } -absl::optional FaultFilter::delayDuration() { - absl::optional ret; +absl::optional +FaultFilter::delayDuration(const Http::HeaderMap& request_headers) { + absl::optional ret; if (!isDelayEnabled()) { return ret; } - uint64_t duration = config_->runtime().snapshot().getInteger(RuntimeKeys::get().DelayDurationKey, - fault_settings_->delayDuration()); + // See if the configured delay provider has a default delay, if not there is no delay (e.g., + // header configuration and no/invalid header). + auto config_duration = fault_settings_->requestDelay()->duration( + request_headers.get(Filters::Common::Fault::HeaderNames::get().DelayRequest)); + if (!config_duration.has_value()) { + return ret; + } + + std::chrono::milliseconds duration = + std::chrono::milliseconds(config_->runtime().snapshot().getInteger( + RuntimeKeys::get().DelayDurationKey, config_duration.value().count())); if (!downstream_cluster_delay_duration_key_.empty()) { - duration = - config_->runtime().snapshot().getInteger(downstream_cluster_delay_duration_key_, duration); + duration = std::chrono::milliseconds(config_->runtime().snapshot().getInteger( + downstream_cluster_delay_duration_key_, duration.count())); } // Delay only if the duration is >0ms - if (duration > 0) { + if (duration.count() > 0) { ret = duration; } diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index ce6970f7c8e02..58e539c677667 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -17,6 +17,8 @@ #include "common/common/token_bucket_impl.h" #include "common/http/header_utility.h" +#include "extensions/filters/common/fault/fault_config.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -46,35 +48,32 @@ struct FaultFilterStats { */ class FaultSettings : public Router::RouteSpecificFilterConfig { public: - struct RateLimit { - uint64_t fixed_rate_kbps_; - envoy::type::FractionalPercent percentage_; - }; - FaultSettings(const envoy::config::filter::http::fault::v2::HTTPFault& fault); const std::vector& filterHeaders() const { return fault_filter_headers_; } envoy::type::FractionalPercent abortPercentage() const { return abort_percentage_; } - envoy::type::FractionalPercent delayPercentage() const { return fixed_delay_percentage_; } - uint64_t delayDuration() const { return fixed_duration_ms_; } uint64_t abortCode() const { return http_status_; } + const Filters::Common::Fault::FaultDelayConfig* requestDelay() const { + return request_delay_config_.get(); + } const std::string& upstreamCluster() const { return upstream_cluster_; } const std::unordered_set& downstreamNodes() const { return downstream_nodes_; } absl::optional maxActiveFaults() const { return max_active_faults_; } - const absl::optional& responseRateLimit() const { return response_rate_limit_; } + const Filters::Common::Fault::FaultRateLimitConfig* responseRateLimit() const { + return response_rate_limit_.get(); + } private: envoy::type::FractionalPercent abort_percentage_; uint64_t http_status_{}; // HTTP or gRPC return codes - envoy::type::FractionalPercent fixed_delay_percentage_; - uint64_t fixed_duration_ms_{}; // in milliseconds + Filters::Common::Fault::FaultDelayConfigPtr request_delay_config_; std::string upstream_cluster_; // restrict faults to specific upstream cluster std::vector fault_filter_headers_; std::unordered_set downstream_nodes_{}; // Inject failures for specific downstream absl::optional max_active_faults_; - absl::optional response_rate_limit_; + Filters::Common::Fault::FaultRateLimitConfigPtr response_rate_limit_; }; /** @@ -160,7 +159,7 @@ class StreamRateLimiter : Logger::Loggable { /** * A filter that is capable of faulting an entire request before dispatching it upstream. */ -class FaultFilter : public Http::StreamFilter { +class FaultFilter : public Http::StreamFilter, Logger::Loggable { public: FaultFilter(FaultFilterConfigSharedPtr config); ~FaultFilter(); @@ -200,7 +199,7 @@ class FaultFilter : public Http::StreamFilter { const std::string DelayDurationKey = "fault.http.delay.fixed_duration_ms"; const std::string AbortHttpStatusKey = "fault.http.abort.http_status"; const std::string MaxActiveFaultsKey = "fault.http.max_active_faults"; - const std::string ResponseRateLimitKey = "fault.http.rate_limit.response_percent"; + const std::string ResponseRateLimitPercentKey = "fault.http.rate_limit.response_percent"; }; using RuntimeKeys = ConstSingleton; @@ -215,10 +214,10 @@ class FaultFilter : public Http::StreamFilter { bool matchesDownstreamNodes(const Http::HeaderMap& headers); bool isAbortEnabled(); bool isDelayEnabled(); - absl::optional delayDuration(); + absl::optional delayDuration(const Http::HeaderMap& request_headers); uint64_t abortHttpStatus(); void maybeIncActiveFaults(); - void maybeSetupResponseRateLimit(); + void maybeSetupResponseRateLimit(const Http::HeaderMap& request_headers); FaultFilterConfigSharedPtr config_; Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index 6891198b77b25..36a94de85abcb 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -75,6 +75,7 @@ envoy_cc_library( "//source/common/network:filter_lib", "//source/common/protobuf:utility_lib", "//source/common/singleton:const_singleton", + "//source/extensions/filters/common/fault:fault_config_lib", "//source/extensions/filters/network:well_known_names", "@envoy_api//envoy/config/filter/network/mongo_proxy/v2:mongo_proxy_cc", ], diff --git a/source/extensions/filters/network/mongo_proxy/config.cc b/source/extensions/filters/network/mongo_proxy/config.cc index 44cf1dbf02864..a8989947e75a6 100644 --- a/source/extensions/filters/network/mongo_proxy/config.cc +++ b/source/extensions/filters/network/mongo_proxy/config.cc @@ -27,11 +27,9 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP context.dispatcher().timeSource())); } - FaultConfigSharedPtr fault_config; + Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config; if (proto_config.has_delay()) { - auto delay = proto_config.delay(); - ASSERT(delay.has_fixed_delay()); - fault_config = std::make_shared(proto_config.delay()); + fault_config = std::make_shared(proto_config.delay()); } const bool emit_dynamic_metadata = proto_config.emit_dynamic_metadata(); @@ -39,8 +37,7 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP emit_dynamic_metadata](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared( stat_prefix, context.scope(), context.runtime(), access_log, fault_config, - context.drainDecision(), context.random(), context.dispatcher().timeSource(), - emit_dynamic_metadata)); + context.drainDecision(), context.dispatcher().timeSource(), emit_dynamic_metadata)); }; } diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index b0022ae14cb1d..59621b337928b 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -56,13 +56,12 @@ void AccessLog::logMessage(const Message& message, bool full, ProxyFilter::ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime, AccessLogSharedPtr access_log, - const FaultConfigSharedPtr& fault_config, - const Network::DrainDecision& drain_decision, - Runtime::RandomGenerator& generator, TimeSource& time_source, + const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config, + const Network::DrainDecision& drain_decision, TimeSource& time_source, bool emit_dynamic_metadata) : stat_prefix_(stat_prefix), scope_(scope), stats_(generateStats(stat_prefix, scope)), - runtime_(runtime), drain_decision_(drain_decision), generator_(generator), - access_log_(access_log), fault_config_(fault_config), time_source_(time_source), + runtime_(runtime), drain_decision_(drain_decision), access_log_(access_log), + fault_config_(fault_config), time_source_(time_source), emit_dynamic_metadata_(emit_dynamic_metadata) { if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().ConnectionLoggingEnabled, 100)) { @@ -365,26 +364,30 @@ DecoderPtr ProdProxyFilter::createDecoder(DecoderCallbacks& callbacks) { return DecoderPtr{new DecoderImpl(callbacks)}; } -absl::optional ProxyFilter::delayDuration() { - absl::optional result; +absl::optional ProxyFilter::delayDuration() { + absl::optional result; if (!fault_config_) { return result; } if (!runtime_.snapshot().featureEnabled(MongoRuntimeConfig::get().FixedDelayPercent, - fault_config_->delayPercentage().numerator(), - generator_.random(), - ProtobufPercentHelper::fractionalPercentDenominatorToInt( - fault_config_->delayPercentage().denominator()))) { + fault_config_->percentage())) { return result; } - const uint64_t duration = runtime_.snapshot().getInteger( - MongoRuntimeConfig::get().FixedDelayDurationMs, fault_config_->delayDuration()); + // See if the delay provider has a default delay, if not there is no delay. + auto config_duration = fault_config_->duration(nullptr); + if (!config_duration.has_value()) { + return result; + } + + const std::chrono::milliseconds duration = + std::chrono::milliseconds(runtime_.snapshot().getInteger( + MongoRuntimeConfig::get().FixedDelayDurationMs, config_duration.value().count())); // Delay only if the duration is > 0ms. - if (duration > 0) { + if (duration.count() > 0) { result = duration; } @@ -405,12 +408,12 @@ void ProxyFilter::tryInjectDelay() { return; } - const absl::optional delay_ms = delayDuration(); + const absl::optional delay = delayDuration(); - if (delay_ms) { + if (delay) { delay_timer_ = read_callbacks_->connection().dispatcher().createTimer( [this]() -> void { delayInjectionTimerCallback(); }); - delay_timer_->enableTimer(std::chrono::milliseconds(delay_ms.value())); + delay_timer_->enableTimer(delay.value()); stats_.delays_injected_.inc(); } } diff --git a/source/extensions/filters/network/mongo_proxy/proxy.h b/source/extensions/filters/network/mongo_proxy/proxy.h index c1e183e93bb11..6c9a308800f42 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.h +++ b/source/extensions/filters/network/mongo_proxy/proxy.h @@ -23,6 +23,7 @@ #include "common/protobuf/utility.h" #include "common/singleton/const_singleton.h" +#include "extensions/filters/common/fault/fault_config.h" #include "extensions/filters/network/mongo_proxy/codec.h" #include "extensions/filters/network/mongo_proxy/utility.h" @@ -98,24 +99,6 @@ class AccessLog { typedef std::shared_ptr AccessLogSharedPtr; -/** - * Mongo fault configuration. - */ -class FaultConfig { -public: - FaultConfig(const envoy::config::filter::fault::v2::FaultDelay& fault_config) - : delay_percentage_(fault_config.percentage()), - duration_ms_(PROTOBUF_GET_MS_REQUIRED(fault_config, fixed_delay)) {} - envoy::type::FractionalPercent delayPercentage() const { return delay_percentage_; } - uint64_t delayDuration() const { return duration_ms_; } - -private: - envoy::type::FractionalPercent delay_percentage_; - const uint64_t duration_ms_; -}; - -typedef std::shared_ptr FaultConfigSharedPtr; - /** * A sniffing filter for mongo traffic. The current implementation makes a copy of read/written * data, decodes it, and generates stats. @@ -126,9 +109,10 @@ class ProxyFilter : public Network::Filter, Logger::Loggable { public: ProxyFilter(const std::string& stat_prefix, Stats::Scope& scope, Runtime::Loader& runtime, - AccessLogSharedPtr access_log, const FaultConfigSharedPtr& fault_config, - const Network::DrainDecision& drain_decision, Runtime::RandomGenerator& generator, - TimeSource& time_system, bool emit_dynamic_metadata); + AccessLogSharedPtr access_log, + const Filters::Common::Fault::FaultDelayConfigSharedPtr& fault_config, + const Network::DrainDecision& drain_decision, TimeSource& time_system, + bool emit_dynamic_metadata); ~ProxyFilter(); virtual DecoderPtr createDecoder(DecoderCallbacks& callbacks) PURE; @@ -188,7 +172,7 @@ class ProxyFilter : public Network::Filter, void doDecode(Buffer::Instance& buffer); void logMessage(Message& message, bool full); void onDrainClose(); - absl::optional delayDuration(); + absl::optional delayDuration(); void delayInjectionTimerCallback(); void tryInjectDelay(); @@ -198,14 +182,13 @@ class ProxyFilter : public Network::Filter, MongoProxyStats stats_; Runtime::Loader& runtime_; const Network::DrainDecision& drain_decision_; - Runtime::RandomGenerator& generator_; Buffer::OwnedImpl read_buffer_; Buffer::OwnedImpl write_buffer_; bool sniffing_{true}; std::list active_query_list_; AccessLogSharedPtr access_log_; Network::ReadFilterCallbacks* read_callbacks_{}; - const FaultConfigSharedPtr fault_config_; + const Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config_; Event::TimerPtr delay_timer_; Event::TimerPtr drain_close_timer_; TimeSource& time_source_; diff --git a/test/extensions/filters/common/fault/BUILD b/test/extensions/filters/common/fault/BUILD new file mode 100644 index 0000000000000..b32b0d000811f --- /dev/null +++ b/test/extensions/filters/common/fault/BUILD @@ -0,0 +1,18 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +envoy_package() + +envoy_cc_test( + name = "fault_config_test", + srcs = ["fault_config_test.cc"], + deps = [ + "//source/extensions/filters/common/fault:fault_config_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/common/fault/fault_config_test.cc b/test/extensions/filters/common/fault/fault_config_test.cc new file mode 100644 index 0000000000000..a01a1c5690063 --- /dev/null +++ b/test/extensions/filters/common/fault/fault_config_test.cc @@ -0,0 +1,60 @@ +#include "extensions/filters/common/fault/fault_config.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Filters { +namespace Common { +namespace Fault { +namespace { + +TEST(FaultConfigTest, FaultDelayHeaderConfig) { + envoy::config::filter::fault::v2::FaultDelay proto_config; + proto_config.mutable_header_delay(); + FaultDelayConfig config(proto_config); + + // No header. + EXPECT_EQ(absl::nullopt, config.duration(nullptr)); + + // Header with bad data. + Http::TestHeaderMapImpl bad_headers{{"x-envoy-fault-delay-request", "abc"}}; + EXPECT_EQ(absl::nullopt, config.duration(bad_headers.get(HeaderNames::get().DelayRequest))); + + // Valid header. + Http::TestHeaderMapImpl good_headers{{"x-envoy-fault-delay-request", "123"}}; + EXPECT_EQ(std::chrono::milliseconds(123), + config.duration(good_headers.get(HeaderNames::get().DelayRequest)).value()); +} + +TEST(FaultConfigTest, FaultRateLimitHeaderConfig) { + envoy::config::filter::fault::v2::FaultRateLimit proto_config; + proto_config.mutable_header_limit(); + FaultRateLimitConfig config(proto_config); + + // No header. + EXPECT_EQ(absl::nullopt, config.rateKbps(nullptr)); + + // Header with bad data. + Http::TestHeaderMapImpl bad_headers{{"x-envoy-fault-throughput-response", "abc"}}; + EXPECT_EQ(absl::nullopt, config.rateKbps(bad_headers.get(HeaderNames::get().ThroughputResponse))); + + // Header with zero. + Http::TestHeaderMapImpl zero_headers{{"x-envoy-fault-throughput-response", "0"}}; + EXPECT_EQ(absl::nullopt, + config.rateKbps(zero_headers.get(HeaderNames::get().ThroughputResponse))); + + // Valid header. + Http::TestHeaderMapImpl good_headers{{"x-envoy-fault-throughput-response", "123"}}; + EXPECT_EQ(123UL, + config.rateKbps(good_headers.get(HeaderNames::get().ThroughputResponse)).value()); +} + +} // namespace +} // namespace Fault +} // namespace Common +} // namespace Filters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index 782f88933efeb..3aabd78412062 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -26,6 +26,20 @@ name: envoy.fault percentage: numerator: 100 )EOF"; + + const std::string header_fault_config_ = + R"EOF( +name: envoy.fault +config: + delay: + header_delay: {} + percentage: + numerator: 100 + response_rate_limit: + header_limit: {} + percentage: + numerator: 100 +)EOF"; }; // Fault integration tests that should run with all protocols, useful for testing various @@ -47,6 +61,9 @@ config: {} codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 1024); + + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); } // Response rate limited with no trailers. @@ -65,10 +82,56 @@ TEST_P(FaultIntegrationTestAllProtocols, ResponseRateLimitNoTrailers) { simTime().sleep(std::chrono::milliseconds(63)); decoder->waitForBodyData(1088); - // Advance time and wait for a ticks worth of data and end stream. + // Advance time and wait for a tick worth of data and end stream. simTime().sleep(std::chrono::milliseconds(63)); decoder->waitForBodyData(1152); decoder->waitForEndStream(); + + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); +} + +// Request delay and response rate limited via header configuration. +TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfig) { + initializeFilter(header_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + Http::TestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-envoy-fault-delay-request", "200"}, + {"x-envoy-fault-throughput-response", "1"}}; + const auto current_time = simTime().monotonicTime(); + IntegrationStreamDecoderPtr decoder = codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + + // At least 200ms of simulated time should have elapsed before we got the upstream request. + EXPECT_LE(std::chrono::milliseconds(200), simTime().monotonicTime() - current_time); + + // Verify response body throttling. + upstream_request_->encodeHeaders(default_response_headers_, false); + Buffer::OwnedImpl data(std::string(1025, 'a')); + upstream_request_->encodeData(data, true); + decoder->waitForBodyData(1024); + + // Advance time and wait for a tick worth of data and end stream. + simTime().sleep(std::chrono::milliseconds(63)); + decoder->waitForBodyData(1025); + decoder->waitForEndStream(); + + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); +} + +// Header configuration with no headers, so no fault injection. +TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfigNoHeaders) { + initializeFilter(header_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto response = + sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 1024); + + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); } // Fault integration tests that run with HTTP/2 only, used for fully testing trailers. @@ -90,11 +153,11 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { upstream_request_->encodeData(data, false); decoder->waitForBodyData(1024); - // Advance time and wait for a ticks worth of data. + // Advance time and wait for a tick worth of data. simTime().sleep(std::chrono::milliseconds(63)); decoder->waitForBodyData(1088); - // Advance time and wait for a ticks worth of data. + // Advance time and wait for a tick worth of data. simTime().sleep(std::chrono::milliseconds(63)); decoder->waitForBodyData(1152); @@ -103,6 +166,9 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { upstream_request_->encodeTrailers(trailers); decoder->waitForEndStream(); EXPECT_NE(nullptr, decoder->trailers()); + + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); } // Rate limiting with trailers received before the body has been flushed. @@ -119,15 +185,18 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyNotFlushed) { upstream_request_->encodeTrailers(trailers); decoder->waitForBodyData(1024); - // Advance time and wait for a ticks worth of data. + // Advance time and wait for a tick worth of data. simTime().sleep(std::chrono::milliseconds(63)); decoder->waitForBodyData(1088); - // Advance time and wait for a ticks worth of data, trailers, and end stream. + // Advance time and wait for a tick worth of data, trailers, and end stream. simTime().sleep(std::chrono::milliseconds(63)); decoder->waitForBodyData(1152); decoder->waitForEndStream(); EXPECT_NE(nullptr, decoder->trailers()); + + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); } } // namespace diff --git a/test/extensions/filters/http/fault/fault_filter_test.cc b/test/extensions/filters/http/fault/fault_filter_test.cc index 72335a27548ce..e6d108ff0da0d 100644 --- a/test/extensions/filters/http/fault/fault_filter_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_test.cc @@ -262,13 +262,6 @@ TEST(FaultFilterBadConfigTest, MissingDelayDuration) { faultFilterBadConfigHelper(json); } -MATCHER_P(Percent, rhs, "") { - envoy::type::FractionalPercent expected; - expected.set_numerator(rhs); - expected.set_denominator(envoy::type::FractionalPercent::HUNDRED); - return TestUtility::protoEqual(expected, arg); -} - TEST_F(FaultFilterTest, AbortWithHttpStatus) { envoy::config::filter::http::fault::v2::HTTPFault fault; fault.mutable_abort()->mutable_percentage()->set_numerator(100); @@ -281,13 +274,6 @@ TEST_F(FaultFilterTest, AbortWithHttpStatus) { getInteger("fault.http.max_active_faults", std::numeric_limits::max())) .WillOnce(Return(std::numeric_limits::max())); - // Delay related calls - EXPECT_CALL(runtime_.snapshot_, - featureEnabled("fault.http.delay.fixed_delay_percent", - Matcher(Percent(0)))) - .WillOnce(Return(false)); - - EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.delay.fixed_duration_ms", _)).Times(0); EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); EXPECT_CALL(decoder_filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) @@ -375,7 +361,6 @@ TEST_F(FaultFilterTest, Overflow) { TEST_F(FaultFilterTest, FixedDelayDeprecatedPercentAndNonZeroDuration) { envoy::config::filter::http::fault::v2::HTTPFault fault; - fault.mutable_delay()->set_type(envoy::config::filter::fault::v2::FaultDelay::FIXED); fault.mutable_delay()->mutable_percentage()->set_numerator(50); fault.mutable_delay()->mutable_percentage()->set_denominator( envoy::type::FractionalPercent::HUNDRED); @@ -1009,10 +994,6 @@ class FaultFilterRateLimitTest : public FaultFilterTest { featureEnabled("fault.http.rate_limit.response_percent", Matcher(Percent(100)))) .WillOnce(Return(enable_runtime)); - EXPECT_CALL(runtime_.snapshot_, - featureEnabled("fault.http.delay.fixed_delay_percent", - Matcher(Percent(0)))) - .WillOnce(Return(false)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("fault.http.abort.abort_percent", Matcher(Percent(0)))) diff --git a/test/extensions/filters/network/mongo_proxy/proxy_test.cc b/test/extensions/filters/network/mongo_proxy/proxy_test.cc index 5a27208b02f5b..3c1b7168ba51e 100644 --- a/test/extensions/filters/network/mongo_proxy/proxy_test.cc +++ b/test/extensions/filters/network/mongo_proxy/proxy_test.cc @@ -25,6 +25,7 @@ using testing::_; using testing::AnyNumber; using testing::AtLeast; using testing::Invoke; +using testing::Matcher; using testing::NiceMock; using testing::Property; using testing::Return; @@ -82,7 +83,7 @@ class MongoProxyFilterTest : public testing::Test { void initializeFilter(bool emit_dynamic_metadata = false) { filter_ = std::make_unique("test.", store_, runtime_, access_log_, - fault_config_, drain_decision_, generator_, + fault_config_, drain_decision_, dispatcher_.timeSource(), emit_dynamic_metadata); filter_->initializeReadFilterCallbacks(read_filter_callbacks_); filter_->onNewConnection(); @@ -93,15 +94,16 @@ class MongoProxyFilterTest : public testing::Test { } void setupDelayFault(bool enable_fault) { - envoy::config::filter::fault::v2::FaultDelay fault{}; + envoy::config::filter::fault::v2::FaultDelay fault; fault.mutable_percentage()->set_numerator(50); fault.mutable_percentage()->set_denominator(envoy::type::FractionalPercent::HUNDRED); fault.mutable_fixed_delay()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(10)); - fault_config_.reset(new FaultConfig(fault)); + fault_config_.reset(new Filters::Common::Fault::FaultDelayConfig(fault)); - EXPECT_CALL(runtime_.snapshot_, featureEnabled(_, _, _, 100)).Times(AnyNumber()); - EXPECT_CALL(runtime_.snapshot_, featureEnabled("mongo.fault.fixed_delay.percent", 50, _, 100)) + EXPECT_CALL(runtime_.snapshot_, + featureEnabled("mongo.fault.fixed_delay.percent", + Matcher(Percent(50)))) .WillOnce(Return(enable_fault)); if (enable_fault) { @@ -117,12 +119,11 @@ class MongoProxyFilterTest : public testing::Test { std::shared_ptr file_{ new NiceMock()}; AccessLogSharedPtr access_log_; - FaultConfigSharedPtr fault_config_; + Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config_; std::unique_ptr filter_; NiceMock read_filter_callbacks_; Envoy::AccessLog::MockAccessLogManager log_manager_; NiceMock drain_decision_; - NiceMock generator_; TestStreamInfo stream_info_; }; diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 0ab8d2779c839..a2cb7548c2c84 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -536,4 +536,11 @@ MATCHER_P(ProtoEq, rhs, "") { return TestUtility::protoEqual(arg, rhs); } MATCHER_P(RepeatedProtoEq, rhs, "") { return TestUtility::repeatedPtrFieldEqual(arg, rhs); } +MATCHER_P(Percent, rhs, "") { + envoy::type::FractionalPercent expected; + expected.set_numerator(rhs); + expected.set_denominator(envoy::type::FractionalPercent::HUNDRED); + return TestUtility::protoEqual(expected, arg); +} + } // namespace Envoy From 00400d20abb5105f655185387fd75004972b020a Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 26 Mar 2019 16:27:38 -0700 Subject: [PATCH 018/165] docs: start the work of snapping docs to the repo/docker (#6376) Part of #6361 Signed-off-by: Matt Klein --- docs/build.sh | 9 ++++- docs/conf.py | 37 ++++++++++++++++++- docs/root/start/sandboxes/fault_injection.rst | 4 ++ docs/root/start/sandboxes/lua.rst | 4 ++ docs/root/start/sandboxes/mysql.rst | 4 ++ docs/root/start/sandboxes/redis.rst | 4 ++ docs/root/start/start.rst | 24 ++++++------ tools/protodoc/protodoc.py | 5 +-- 8 files changed, 73 insertions(+), 18 deletions(-) create mode 100644 docs/root/start/sandboxes/fault_injection.rst create mode 100644 docs/root/start/sandboxes/lua.rst create mode 100644 docs/root/start/sandboxes/mysql.rst create mode 100644 docs/root/start/sandboxes/redis.rst diff --git a/docs/build.sh b/docs/build.sh index 59178838e1d96..2985c0e51a787 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -16,15 +16,19 @@ then exit 1 fi # Check the version_history.rst contains current release version. - grep --fixed-strings "$VERSION_NUMBER" docs/root/intro/version_history.rst + grep --fixed-strings "$VERSION_NUMBER" docs/root/intro/version_history.rst \ + || (echo "Git tag not found in version_history.rst" && exit 1) + # Now that we now there is a match, we can use the tag. export ENVOY_DOCS_VERSION_STRING="tag-$CIRCLE_TAG" export ENVOY_DOCS_RELEASE_LEVEL=tagged + export ENVOY_BLOB_SHA="$CIRCLE_TAG" else BUILD_SHA=$(git rev-parse HEAD) VERSION_NUM=$(cat VERSION) export ENVOY_DOCS_VERSION_STRING="${VERSION_NUM}"-"${BUILD_SHA:0:6}" export ENVOY_DOCS_RELEASE_LEVEL=pre-release + export ENVOY_BLOB_SHA="$BUILD_SHA" fi SCRIPT_DIR=$(dirname "$0") @@ -42,7 +46,8 @@ source_venv "$BUILD_DIR" pip install -r "${SCRIPT_DIR}"/requirements.txt bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ - tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED --spawn_strategy=standalone + tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED \ + --action_env=ENVOY_BLOB_SHA --spawn_strategy=standalone # These are the protos we want to put in docs, this list will grow. # TODO(htuch): Factor this out of this script. diff --git a/docs/conf.py b/docs/conf.py index 932135b2cf5fe..64c48a8f6c793 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -14,18 +14,45 @@ from datetime import datetime import os +from sphinx.directives.code import CodeBlock import sphinx_rtd_theme import sys +# https://stackoverflow.com/questions/44761197/how-to-use-substitution-definitions-with-code-blocks +class SubstitutionCodeBlock(CodeBlock): + """ + Similar to CodeBlock but replaces placeholders with variables. See "substitutions" below. + """ + + def run(self): + """ + Replace placeholders with given variables. + """ + app = self.state.document.settings.env.app + new_content = [] + existing_content = self.content + for item in existing_content: + for pair in app.config.substitutions: + original, replacement = pair + item = item.replace(original, replacement) + new_content.append(item) + + self.content = new_content + return list(CodeBlock.run(self)) + + def setup(app): app.add_config_value('release_level', '', 'env') + app.add_config_value('substitutions', [], 'html') + app.add_directive('substitution-code-block', SubstitutionCodeBlock) if not os.environ.get('ENVOY_DOCS_RELEASE_LEVEL'): raise Exception("ENVOY_DOCS_RELEASE_LEVEL env var must be defined") release_level = os.environ['ENVOY_DOCS_RELEASE_LEVEL'] +blob_sha = os.environ['ENVOY_BLOB_SHA'] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -42,10 +69,16 @@ def setup(app): # ones. extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig'] extlinks = { - 'repo': ('https://github.com/envoyproxy/envoy/blob/master/%s', ''), - 'api': ('https://github.com/envoyproxy/envoy/blob/master/api/%s', ''), + 'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''), + 'api': ('https://github.com/envoyproxy/envoy/blob/{}/api/%s'.format(blob_sha), ''), } +# Setup global substitutions +if 'pre-release' in release_level: + substitutions = [('|envoy_docker_image|', 'envoy-dev:{}'.format(blob_sha))] +else: + substitutions = [('|envoy_docker_image|', 'envoy:{}'.format(blob_sha))] + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/docs/root/start/sandboxes/fault_injection.rst b/docs/root/start/sandboxes/fault_injection.rst new file mode 100644 index 0000000000000..bf474769e13b6 --- /dev/null +++ b/docs/root/start/sandboxes/fault_injection.rst @@ -0,0 +1,4 @@ +Fault injection +=============== + +* :repo:`Fault Injection ` diff --git a/docs/root/start/sandboxes/lua.rst b/docs/root/start/sandboxes/lua.rst new file mode 100644 index 0000000000000..82c6686982ce2 --- /dev/null +++ b/docs/root/start/sandboxes/lua.rst @@ -0,0 +1,4 @@ +Lua +=== + +* :repo:`Lua ` diff --git a/docs/root/start/sandboxes/mysql.rst b/docs/root/start/sandboxes/mysql.rst new file mode 100644 index 0000000000000..648cb2f7e740a --- /dev/null +++ b/docs/root/start/sandboxes/mysql.rst @@ -0,0 +1,4 @@ +MySQL +===== + +* :repo:`MySQL ` diff --git a/docs/root/start/sandboxes/redis.rst b/docs/root/start/sandboxes/redis.rst new file mode 100644 index 0000000000000..9d81afc6cf22b --- /dev/null +++ b/docs/root/start/sandboxes/redis.rst @@ -0,0 +1,4 @@ +Redis +===== + +* :repo:`Redis ` diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index b3da8ce4b2543..32c256d4f8ae1 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -22,10 +22,12 @@ the same configuration. A very minimal Envoy configuration that can be used to validate basic plain HTTP proxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not -intended to represent a realistic Envoy deployment:: +intended to represent a realistic Envoy deployment: - $ docker pull envoyproxy/envoy-dev:latest - $ docker run --rm -d -p 10000:10000 envoyproxy/envoy-dev:latest +.. substitution-code-block:: none + + $ docker pull envoyproxy/|envoy_docker_image| + $ docker run --rm -d -p 10000:10000 envoyproxy/|envoy_docker_image| $ curl -v localhost:10000 The Docker image used will contain the latest version of Envoy @@ -113,9 +115,9 @@ Using the Envoy Docker Image Create a simple Dockerfile to execute Envoy, which assumes that envoy.yaml (described above) is in your local directory. You can refer to the :ref:`Command line options `. -.. code-block:: none +.. substitution-code-block:: none - FROM envoyproxy/envoy-dev:latest + FROM envoyproxy/|envoy_docker_image| COPY envoy.yaml /etc/envoy/envoy.yaml Build the Docker image that runs your configuration using:: @@ -133,12 +135,12 @@ And finally, test it using:: If you would like to use Envoy with docker-compose you can overwrite the provided configuration file by using a volume. -.. code-block: yaml +.. substitution-code-block: yaml version: '3' services: envoy: - image: envoyproxy/envoy-dev:latest + image: envoyproxy/|envoy_docker_image| ports: - "10000:10000" volumes: @@ -157,14 +159,14 @@ features. The following sandboxes are available: :maxdepth: 1 sandboxes/cors - Fault Injection + sandboxes/fault_injection sandboxes/front_proxy sandboxes/grpc_bridge sandboxes/jaeger_native_tracing sandboxes/jaeger_tracing - Lua - MySQL - Redis + sandboxes/lua + sandboxes/mysql + sandboxes/redis sandboxes/zipkin_tracing Other use cases diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 26852a1a028e5..2a8775aef1b28 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -68,9 +68,8 @@ ]) # Template for data plane API URLs. -# TODO(htuch): Add the ability to build a permalink by feeding a hash -# to the tool or inferring from local tree (only really make sense in CI). -DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/master/api/%s#L%d' +DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( + os.environ['ENVOY_BLOB_SHA']) class ProtodocError(Exception): From 71b3ecdaa818885a50f24feda52b6c63d70c95ca Mon Sep 17 00:00:00 2001 From: Michael Rebello Date: Tue, 26 Mar 2019 20:16:18 -0700 Subject: [PATCH 019/165] docs: fix a few typos in intro (#6390) Signed-off-by: Michael Rebello --- docs/root/intro/arch_overview/grpc.rst | 4 ++-- .../http_connection_management.rst | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/root/intro/arch_overview/grpc.rst b/docs/root/intro/arch_overview/grpc.rst index 84226f444511d..1277ac6e983fd 100644 --- a/docs/root/intro/arch_overview/grpc.rst +++ b/docs/root/intro/arch_overview/grpc.rst @@ -40,9 +40,9 @@ Envoy supports two gRPC bridges: gRPC services ------------- -In addition to proxying gRPC on the data plane, Envoy make use of gRPC for its +In addition to proxying gRPC on the data plane, Envoy makes use of gRPC for its control plane, where it :ref:`fetches configuration from management server(s) -` and also in filters, for example for :ref:`rate limiting +` and in filters, such as for :ref:`rate limiting ` or authorization checks. We refer to these as *gRPC services*. diff --git a/docs/root/intro/arch_overview/http_connection_management.rst b/docs/root/intro/arch_overview/http_connection_management.rst index e21155b3f2b4c..68bdeacb33e20 100644 --- a/docs/root/intro/arch_overview/http_connection_management.rst +++ b/docs/root/intro/arch_overview/http_connection_management.rst @@ -48,31 +48,31 @@ table `. The route table can be specified in one of Retry plugin configuration -------------------------- -Normally during retries, hosts selection follows the same process as the original request. To modify -this behavior retry plugins can be used, which fall into two categories: +Normally during retries, host selection follows the same process as the original request. Retry plugins +can be used to modify this behavior, and they fall into two categories: * :ref:`Host Predicates `: - These predicates can be used to "reject" a host, which will cause host selection to be reattempted. - Any number of these predicates can be specified, and the host will be rejected if any of the predicates reject the host. + These predicates can be used to "reject" a host, which will cause host selection to be reattempted. + Any number of these predicates can be specified, and the host will be rejected if any of the predicates reject the host. Envoy supports the following built-in host predicates * *envoy.retry_host_predicates.previous_hosts*: This will keep track of previously attempted hosts, and rejects hosts that have already been attempted. - + * :ref:`Priority Predicates`: These predicates can be used to adjust the priority load used when selecting a priority for a retry attempt. Only one such predicate may be specified. Envoy supports the following built-in priority predicates - * *envoy.retry_priority.previous_priorities*: This will keep track of previously attempted priorities, + * *envoy.retry_priority.previous_priorities*: This will keep track of previously attempted priorities, and adjust the priority load such that other priorities will be targeted in subsequent retry attempts. Host selection will continue until either the configured predicates accept the host or a configurable -:ref:`max attempts ` has been reached. +:ref:`max attempts ` has been reached. -These plugins can be combined to affect both host selection and priority load. Envoy can also be extended +These plugins can be combined to affect both host selection and priority load. Envoy can also be extended with custom retry plugins similar to how custom filters can be added. @@ -152,7 +152,7 @@ upstream will be modified by: 2. Replacing the Authority/Host, Scheme, and Path headers with the values from the Location header. The altered request headers will then have a new route selected, be sent through a new filter chain, -and then shipped upstream with all of the normal Envoy request sanitization taking place. +and then shipped upstream with all of the normal Envoy request sanitization taking place. .. warning:: Note that HTTP connection manager sanitization such as clearing untrusted headers will only be From 2a2a886109134cfe104d96469f9fd5d2d40ccff4 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Tue, 26 Mar 2019 20:17:36 -0700 Subject: [PATCH 020/165] test: convert router v1 JSON test configs to v2 YAML (#6332) Signed-off-by: Derek Argueta --- test/common/router/BUILD | 4 +- test/common/router/config_impl_test.cc | 2883 +++++++++++------------- 2 files changed, 1279 insertions(+), 1608 deletions(-) diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 89e87cda4ea0e..b6d1ac73a979e 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -17,10 +17,8 @@ envoy_cc_test( deps = [ ":route_fuzz_proto_cc", "//source/common/config:metadata_lib", - "//source/common/config:rds_json_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", - "//source/common/json:json_loader_lib", "//source/common/router:config_lib", "//source/extensions/filters/http/common:empty_http_filter_config_lib", "//test/fuzz:utility_lib", @@ -28,6 +26,8 @@ envoy_cc_test( "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2:rds_cc", + "@envoy_api//envoy/config/filter/http/router/v2:router_cc", ], ) diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index eef650fca0b8a..8bf39550ff09b 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -5,14 +5,14 @@ #include #include +#include "envoy/api/v2/rds.pb.validate.h" +#include "envoy/api/v2/route/route.pb.validate.h" #include "envoy/server/filter_config.h" #include "common/config/metadata.h" -#include "common/config/rds_json.h" #include "common/config/well_known_names.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" -#include "common/json/json_loader.h" #include "common/network/address_impl.h" #include "common/router/config_impl.h" @@ -85,18 +85,10 @@ Http::TestHeaderMapImpl genHeaders(const std::string& host, const std::string& p {"x-route-nope", "route"}}; } -envoy::api::v2::RouteConfiguration parseRouteConfigurationFromJson(const std::string& json_string) { - envoy::api::v2::RouteConfiguration route_config; - auto json_object_ptr = Json::Factory::loadFromString(json_string); - Stats::StatsOptionsImpl stats_options; - Envoy::Config::RdsJson::translateRouteConfiguration(*json_object_ptr, route_config, - stats_options); - return route_config; -} - envoy::api::v2::RouteConfiguration parseRouteConfigurationFromV2Yaml(const std::string& yaml) { envoy::api::v2::RouteConfiguration route_config; MessageUtil::loadFromYaml(yaml, route_config); + MessageUtil::validate(route_config); return route_config; } @@ -113,174 +105,177 @@ class ConfigImplTestBase { class RouteMatcherTest : public testing::Test, public ConfigImplTestBase {}; TEST_F(RouteMatcherTest, TestRoutes) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["lyft.com", "www.lyft.com", "w.lyft.com", "ww.lyft.com", "wwww.lyft.com"], - "routes": [ - { - "prefix": "/new_endpoint", - "prefix_rewrite": "/api/new_endpoint", - "cluster": "www2" - }, - { - "path": "/", - "cluster": "root_www2" - }, - { - "prefix": "/", - "cluster": "www2" - } - ] - }, - { - "name": "www2_staging", - "domains": ["www-staging.lyft.net", "www-staging-orca.lyft.com"], - "routes": [ - { - "prefix": "/", - "cluster": "www2_staging" - } - ] - }, - { - "name": "wildcard", - "domains": ["*.foo.com", "*-bar.baz.com"], - "routes": [ - { - "prefix": "/", - "cluster": "wildcard" - } - ] - }, - { - "name": "wildcard2", - "domains": ["*.baz.com"], - "routes": [ - { - "prefix": "/", - "cluster": "wildcard2" - } - ] - }, - { - "name": "regex", - "domains": ["bat.com"], - "routes": [ - { - "regex": "/t[io]c", - "cluster": "clock" - }, - { - "regex": "/baa+", - "cluster": "sheep" - }, - { - "regex": ".*/\\d{3}$", - "cluster": "three_numbers", - "prefix_rewrite": "/rewrote" - }, - { - "regex": ".*", - "cluster": "regex_default" - } - ] - }, - { - "name": "regex2", - "domains": ["bat2.com"], - "routes": [ - { - "regex": "", - "cluster": "nothingness" - }, - { - "regex": ".*", - "cluster": "regex_default" - } - ] - }, - { - "name": "default", - "domains": ["*"], - "routes": [ - { - "prefix": "/api/application_data", - "cluster": "ats" - }, - { - "path": "/api/locations", - "cluster": "locations", - "prefix_rewrite": "/rewrote", - "case_sensitive": false - }, - { - "prefix": "/api/leads/me", - "cluster": "ats" - }, - { - "prefix": "/host/rewrite/me", - "cluster": "ats", - "host_rewrite": "new_host" - }, - { - "prefix": "/oldhost/rewrite/me", - "cluster": "ats", - "host_rewrite": "new_oldhost" - }, - { - "path": "/foo", - "prefix_rewrite": "/bar", - "cluster": "instant-server", - "case_sensitive": true - }, - { - "path": "/tar", - "prefix_rewrite": "/car", - "cluster": "instant-server", - "case_sensitive": false - }, - { - "prefix": "/newhost/rewrite/me", - "cluster": "ats", - "host_rewrite": "new_host", - "case_sensitive": false - }, - { - "path": "/FOOD", - "prefix_rewrite": "/cAndy", - "cluster": "ats", - "case_sensitive":false - }, - { - "path": "/ApplEs", - "prefix_rewrite": "/oranGES", - "cluster": "instant-server", - "case_sensitive": true - }, - { - "prefix": "/", - "cluster": "instant-server", - "timeout_ms": 30000 - }], - "virtual_clusters": [ - {"pattern": "^/rides$", "method": "POST", "name": "ride_request"}, - {"pattern": "^/rides/\\d+$", "method": "PUT", "name": "update_ride"}, - {"pattern": "^/users/\\d+/chargeaccounts$", "method": "POST", "name": "cc_add"}, - {"pattern": "^/users/\\d+/chargeaccounts/(?!validate)\\w+$", "method": "PUT", - "name": "cc_add"}, - {"pattern": "^/users$", "method": "POST", "name": "create_user_login"}, - {"pattern": "^/users/\\d+$", "method": "PUT", "name": "update_user"}, - {"pattern": "^/users/\\d+/location$", "method": "POST", "name": "ulu"}] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - lyft.com + - www.lyft.com + - w.lyft.com + - ww.lyft.com + - wwww.lyft.com + routes: + - match: + prefix: "/new_endpoint" + route: + prefix_rewrite: "/api/new_endpoint" + cluster: www2 + - match: + path: "/" + route: + cluster: root_www2 + - match: + prefix: "/" + route: + cluster: www2 +- name: www2_staging + domains: + - www-staging.lyft.net + - www-staging-orca.lyft.com + routes: + - match: + prefix: "/" + route: + cluster: www2_staging +- name: wildcard + domains: + - "*.foo.com" + - "*-bar.baz.com" + routes: + - match: + prefix: "/" + route: + cluster: wildcard +- name: wildcard2 + domains: + - "*.baz.com" + routes: + - match: + prefix: "/" + route: + cluster: wildcard2 +- name: regex + domains: + - bat.com + routes: + - match: + regex: "/t[io]c" + route: + cluster: clock + - match: + regex: "/baa+" + route: + cluster: sheep + - match: + regex: ".*/\\d{3}$" + route: + cluster: three_numbers + prefix_rewrite: "/rewrote" + - match: + regex: ".*" + route: + cluster: regex_default +- name: regex2 + domains: + - bat2.com + routes: + - match: + regex: '' + route: + cluster: nothingness + - match: + regex: ".*" + route: + cluster: regex_default +- name: default + domains: + - "*" + routes: + - match: + prefix: "/api/application_data" + route: + cluster: ats + - match: + path: "/api/locations" + case_sensitive: false + route: + cluster: locations + prefix_rewrite: "/rewrote" + - match: + prefix: "/api/leads/me" + route: + cluster: ats + - match: + prefix: "/host/rewrite/me" + route: + cluster: ats + host_rewrite: new_host + - match: + prefix: "/oldhost/rewrite/me" + route: + cluster: ats + host_rewrite: new_oldhost + - match: + path: "/foo" + case_sensitive: true + route: + prefix_rewrite: "/bar" + cluster: instant-server + - match: + path: "/tar" + case_sensitive: false + route: + prefix_rewrite: "/car" + cluster: instant-server + - match: + prefix: "/newhost/rewrite/me" + case_sensitive: false + route: + cluster: ats + host_rewrite: new_host + - match: + path: "/FOOD" + case_sensitive: false + route: + prefix_rewrite: "/cAndy" + cluster: ats + - match: + path: "/ApplEs" + case_sensitive: true + route: + prefix_rewrite: "/oranGES" + cluster: instant-server + - match: + prefix: "/" + route: + cluster: instant-server + timeout: 30s + virtual_clusters: + - pattern: "^/rides$" + method: POST + name: ride_request + - pattern: "^/rides/\\d+$" + method: PUT + name: update_ride + - pattern: "^/users/\\d+/chargeaccounts$" + method: POST + name: cc_add + - pattern: "^/users/\\d+/chargeaccounts/(?!validate)\\w+$" + method: PUT + name: cc_add + - pattern: "^/users$" + method: POST + name: create_user_login + - pattern: "^/users/\\d+$" + method: PUT + name: update_user + - pattern: "^/users/\\d+/location$" + method: POST + name: ulu )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); // Base routing testing. EXPECT_EQ("instant-server", @@ -609,91 +604,94 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { // Validates behavior of request_headers_to_add at router, vhost, and route action levels. TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["lyft.com", "www.lyft.com", "w.lyft.com", "ww.lyft.com", "wwww.lyft.com"], - "request_headers_to_add": [ - {"key": "x-global-header1", "value": "vhost-override"}, - {"key": "x-vhost-header1", "value": "vhost1-www2"} - ], - "routes": [ - { - "prefix": "/new_endpoint", - "prefix_rewrite": "/api/new_endpoint", - "cluster": "www2", - "request_headers_to_add": [ - {"key": "x-global-header1", "value": "route-override"}, - {"key": "x-vhost-header1", "value": "route-override"}, - {"key": "x-route-action-header", "value": "route-new_endpoint"} - ] - }, - { - "path": "/", - "cluster": "root_www2", - "request_headers_to_add": [ - {"key": "x-route-action-header", "value": "route-allpath"} - ] - }, - { - "prefix": "/", - "cluster": "www2" - } - ] - }, - { - "name": "www2_staging", - "domains": ["www-staging.lyft.net", "www-staging-orca.lyft.com"], - "request_headers_to_add": [ - {"key": "x-vhost-header1", "value": "vhost1-www2_staging"} - ], - "routes": [ - { - "prefix": "/", - "cluster": "www2_staging", - "request_headers_to_add": [ - {"key": "x-route-action-header", "value": "route-allprefix"} - ] - } - ] - }, - { - "name": "default", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "instant-server", - "timeout_ms": 30000 - } - ] - } - ], - - "internal_only_headers": [ - "x-lyft-user-id" - ], - - "response_headers_to_add": [ - {"key": "x-envoy-upstream-canary", "value": "true"} - ], - - "response_headers_to_remove": [ - "x-envoy-upstream-canary", - "x-envoy-virtual-cluster" - ], - - "request_headers_to_add": [ - {"key": "x-global-header1", "value": "global1"} - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - lyft.com + - www.lyft.com + - w.lyft.com + - ww.lyft.com + - wwww.lyft.com + request_headers_to_add: + - header: + key: x-global-header1 + value: vhost-override + - header: + key: x-vhost-header1 + value: vhost1-www2 + routes: + - match: + prefix: "/new_endpoint" + route: + prefix_rewrite: "/api/new_endpoint" + cluster: www2 + request_headers_to_add: + - header: + key: x-global-header1 + value: route-override + - header: + key: x-vhost-header1 + value: route-override + - header: + key: x-route-action-header + value: route-new_endpoint + - match: + path: "/" + route: + cluster: root_www2 + request_headers_to_add: + - header: + key: x-route-action-header + value: route-allpath + - match: + prefix: "/" + route: + cluster: www2 +- name: www2_staging + domains: + - www-staging.lyft.net + - www-staging-orca.lyft.com + request_headers_to_add: + - header: + key: x-vhost-header1 + value: vhost1-www2_staging + routes: + - match: + prefix: "/" + route: + cluster: www2_staging + request_headers_to_add: + - header: + key: x-route-action-header + value: route-allprefix +- name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: instant-server + timeout: 3s +internal_only_headers: +- x-lyft-user-id +response_headers_to_add: +- header: + key: x-envoy-upstream-canary + value: 'true' +response_headers_to_remove: +- x-envoy-upstream-canary +- x-envoy-virtual-cluster +request_headers_to_add: +- header: + key: x-global-header1 + value: global1 )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); // Request header manipulation testing. { @@ -1036,31 +1034,28 @@ name: foo } TEST_F(RouteMatcherTest, Priority) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/foo", - "cluster": "local_service_grpc", - "priority": "high" - }, - { - "prefix": "/bar", - "cluster": "local_service_grpc" - } - ], - "virtual_clusters": [ - {"pattern": "^/bar$", "method": "POST", "name": "foo"}] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster: local_service_grpc + priority: high + - match: + prefix: "/bar" + route: + cluster: local_service_grpc + virtual_clusters: + - pattern: "^/bar$" + method: POST + name: foo )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_EQ(Upstream::ResourcePriority::High, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry()->priority()); @@ -1069,91 +1064,84 @@ TEST_F(RouteMatcherTest, Priority) { } TEST_F(RouteMatcherTest, NoHostRewriteAndAutoRewrite) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "local_service", - "host_rewrite": "foo", - "auto_host_rewrite" : true - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: local_service + host_rewrite: foo + auto_host_rewrite: true )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(RouteMatcherTest, HeaderMatchedRouting) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "local_service_with_headers", - "headers" : [ - {"name": "test_header", "value": "test"} - ] - }, - { - "prefix": "/", - "cluster": "local_service_with_multiple_headers", - "headers" : [ - {"name": "test_header_multiple1", "value": "test1"}, - {"name": "test_header_multiple2", "value": "test2"} - ] - }, - { - "prefix": "/", - "cluster": "local_service_with_empty_headers", - "headers" : [ - {"name": "test_header_presence"} - ] - }, - { - "prefix": "/", - "cluster": "local_service_with_header_pattern_set_regex", - "headers" : [ - {"name": "test_header_pattern", "value": "^user=test-\\d+$", "regex": true} - ] - }, - { - "prefix": "/", - "cluster": "local_service_with_header_pattern_unset_regex", - "headers" : [ - {"name": "test_header_pattern", "value": "^customer=test-\\d+$"} - ] - }, - { - "prefix": "/", - "cluster": "local_service_with_header_range", - "headers" : [ - {"name": "test_header_range", "range_match": {"start" : 1, "end" : 10}} - ] - }, - { - "prefix": "/", - "cluster": "local_service_without_headers" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + headers: + - name: test_header + exact_match: test + route: + cluster: local_service_with_headers + - match: + prefix: "/" + headers: + - name: test_header_multiple1 + exact_match: test1 + - name: test_header_multiple2 + exact_match: test2 + route: + cluster: local_service_with_multiple_headers + - match: + prefix: "/" + headers: + - name: test_header_presence + present_match: true + route: + cluster: local_service_with_empty_headers + - match: + prefix: "/" + headers: + - name: test_header_pattern + regex_match: "^user=test-\\d+$" + route: + cluster: local_service_with_header_pattern_set_regex + - match: + prefix: "/" + headers: + - name: test_header_pattern + exact_match: "^customer=test-\\d+$" + route: + cluster: local_service_with_header_pattern_unset_regex + - match: + prefix: "/" + headers: + - name: test_header_range + range_match: + start: 1 + end: 10 + route: + cluster: local_service_with_header_range + - match: + prefix: "/" + route: + cluster: local_service_without_headers )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); { EXPECT_EQ("local_service_without_headers", @@ -1254,46 +1242,42 @@ TEST_F(RouteMatcherTest, InvalidHeaderMatchedRoutingConfig) { } TEST_F(RouteMatcherTest, QueryParamMatchedRouting) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "local_service_with_multiple_query_parameters", - "query_parameters": [ - {"name": "id", "value": "\\d+[02468]", "regex": true}, - {"name": "debug"} - ] - }, - { - "prefix": "/", - "cluster": "local_service_with_query_parameter", - "query_parameters": [ - {"name": "param", "value": "test"} - ] - }, - { - "prefix": "/", - "cluster": "local_service_with_valueless_query_parameter", - "query_parameters": [ - {"name": "debug"} - ] - }, - { - "prefix": "/", - "cluster": "local_service_without_query_parameters" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + query_parameters: + - name: id + value: "\\d+[02468]" + regex: true + - name: debug + route: + cluster: local_service_with_multiple_query_parameters + - match: + prefix: "/" + query_parameters: + - name: param + value: test + route: + cluster: local_service_with_query_parameter + - match: + prefix: "/" + query_parameters: + - name: debug + route: + cluster: local_service_with_valueless_query_parameter + - match: + prefix: "/" + route: + cluster: local_service_without_query_parameters + )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); { Http::TestHeaderMapImpl headers = genHeaders("example.com", "/", "GET"); @@ -1386,27 +1370,22 @@ class RouterMatcherHashPolicyTest : public testing::Test, public ConfigImplTestB RouterMatcherHashPolicyTest() : add_cookie_nop_( [](const std::string&, const std::string&, std::chrono::seconds) { return ""; }) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/foo", - "cluster": "foo" - }, - { - "prefix": "/bar", - "cluster": "bar" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster: foo + - match: + prefix: "/bar" + route: + cluster: bar )EOF"; - route_config_ = parseRouteConfigurationFromJson(json); + route_config_ = parseRouteConfigurationFromV2Yaml(yaml); } envoy::api::v2::route::RouteAction_HashPolicy* firstRouteHashPolicy() { @@ -1836,30 +1815,25 @@ TEST_F(RouterMatcherHashPolicyTest, InvalidHashPolicies) { } TEST_F(RouteMatcherTest, ClusterHeader) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/foo", - "cluster_header": ":authority" - }, - { - "prefix": "/bar", - "cluster_header": "some_header", - "timeout_ms": 0 - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster_header: ":authority" + - match: + prefix: "/bar" + route: + cluster_header: some_header + timeout: 0s )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_EQ( "some_cluster", @@ -1889,31 +1863,26 @@ TEST_F(RouteMatcherTest, ClusterHeader) { } TEST_F(RouteMatcherTest, ContentType) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "local_service_grpc", - "headers" : [ - {"name": "content-type", "value": "application/grpc"} - ] - }, - { - "prefix": "/", - "cluster": "local_service" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + headers: + - name: content-type + exact_match: application/grpc + route: + cluster: local_service_grpc + - match: + prefix: "/" + route: + cluster: local_service )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); { EXPECT_EQ("local_service", @@ -1975,24 +1944,18 @@ TEST_F(RouteMatcherTest, FractionalRuntime) { } TEST_F(RouteMatcherTest, ShadowClusterNotFound) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "shadow": { - "cluster": "some_cluster" - }, - "cluster": "www2" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + request_mirror_policy: + cluster: some_cluster + cluster: www2 )EOF"; EXPECT_CALL(factory_context_.cluster_manager_, get("www2")) @@ -2000,79 +1963,64 @@ TEST_F(RouteMatcherTest, ShadowClusterNotFound) { EXPECT_CALL(factory_context_.cluster_manager_, get("some_cluster")) .WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(RouteMatcherTest, ClusterNotFound) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2 )EOF"; EXPECT_CALL(factory_context_.cluster_manager_, get("www2")).WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(RouteMatcherTest, ClusterNotFoundNotChecking) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2 )EOF"; EXPECT_CALL(factory_context_.cluster_manager_, get("www2")).WillRepeatedly(Return(nullptr)); - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, false); + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); } TEST_F(RouteMatcherTest, ClusterNotFoundNotCheckingViaConfig) { - const std::string json = R"EOF( -{ - "validate_clusters": false, - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2" - } - ] - } - ] -} + const std::string yaml = R"EOF( +validate_clusters: false +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www )EOF"; EXPECT_CALL(factory_context_.cluster_manager_, get("www2")).WillRepeatedly(Return(nullptr)); - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); } TEST_F(RouteMatcherTest, AttemptCountHeader) { @@ -2157,39 +2105,32 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig404) { } TEST_F(RouteMatcherTest, Shadow) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "shadow": { - "cluster": "some_cluster" - }, - "cluster": "www2" - }, - { - "prefix": "/bar", - "shadow": { - "cluster": "some_cluster2", - "runtime_key": "foo" - }, - "cluster": "www2" - }, - { - "prefix": "/baz", - "cluster": "www2" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + request_mirror_policy: + cluster: some_cluster + cluster: www2 + - match: + prefix: "/bar" + route: + request_mirror_policy: + cluster: some_cluster2 + runtime_key: foo + cluster: www2 + - match: + prefix: "/baz" + route: + cluster: www2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_EQ("some_cluster", config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() @@ -2264,40 +2205,33 @@ name: foo } TEST_F(RouteMatcherTest, Retry) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2", - "retry_policy": { - "retry_on": "connect-failure" - } - }, - { - "prefix": "/bar", - "cluster": "www2" - }, - { - "prefix": "/", - "cluster": "www2", - "retry_policy": { - "per_try_timeout_ms" : 1000, - "num_retries": 3, - "retry_on": "5xx,gateway-error,connect-failure" - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2 + retry_policy: + retry_on: connect-failure + - match: + prefix: "/bar" + route: + cluster: www2 + - match: + prefix: "/" + route: + cluster: www2 + retry_policy: + per_try_timeout: 1s + num_retries: 3 + retry_on: 5xx,gateway-error,connect-failure )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_EQ(std::chrono::milliseconds(0), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -2415,40 +2349,33 @@ name: RetryVirtualHostLevel } TEST_F(RouteMatcherTest, GrpcRetry) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2", - "retry_policy": { - "retry_on": "connect-failure" - } - }, - { - "prefix": "/bar", - "cluster": "www2" - }, - { - "prefix": "/", - "cluster": "www2", - "retry_policy": { - "per_try_timeout_ms" : 1000, - "num_retries": 3, - "retry_on": "5xx,deadline-exceeded,resource-exhausted" - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2 + retry_policy: + retry_on: connect-failure + - match: + prefix: "/bar" + route: + cluster: www2 + - match: + prefix: "/" + route: + cluster: www2 + retry_policy: + per_try_timeout: 1s + num_retries: 3 + retry_on: 5xx,deadline-exceeded,resource-exhausted )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_EQ(std::chrono::milliseconds(0), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -2642,71 +2569,57 @@ name: HedgeVirtualHostLevel } TEST_F(RouteMatcherTest, TestBadDefaultConfig) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - }, - { - "name": "www2_staging", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "www2_staging" - } - ] - } - ], - - "internal_only_headers": [ - "x-lyft-user-id" - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: www2 +- name: www2_staging + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: www2_staging +internal_only_headers: +- x-lyft-user-id )EOF"; - EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true), - EnvoyException); + EXPECT_THROW( + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException); } TEST_F(RouteMatcherTest, TestDuplicateDomainConfig) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - }, - { - "name": "www2_staging", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "cluster": "www2_staging" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + cluster: www2 +- name: www2_staging + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + cluster: www2_staging )EOF"; - EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true), - EnvoyException); + EXPECT_THROW( + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException); } // Test to detect if hostname matches are case-insensitive @@ -2744,59 +2657,10 @@ static Http::TestHeaderMapImpl genRedirectHeaders(const std::string& host, const } TEST_F(RouteMatcherTest, DirectResponse) { - static const std::string v1_json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "require_ssl": "all", - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - }, - { - "name": "api", - "domains": ["api.lyft.com"], - "require_ssl": "external_only", - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - }, - { - "name": "redirect", - "domains": ["redirect.lyft.com"], - "routes": [ - { - "path": "/host", - "host_redirect": "new.lyft.com" - }, - { - "path": "/path", - "path_redirect": "/new_path" - }, - { - "path": "/host_path", - "host_redirect": "new.lyft.com", - "path_redirect": "/new_path" - } - ] - } - ] -} - )EOF"; - const auto pathname = TestEnvironment::writeStringToFileForTest("direct_response_body", "Example text 3"); - // A superset of v1_json, with API v2 direct-response configuration added. - static const std::string v2_yaml = R"EOF( + static const std::string yaml = R"EOF( name: foo virtual_hosts: - name: www2 @@ -2939,316 +2803,298 @@ name: foo direct_response: status: 200 body: { filename: )EOF" + pathname + - R"EOF(} + R"EOF(} - match: { prefix: / } route: { cluster: www2 } )EOF"; - auto testConfig = [](const ConfigImpl& config, bool test_v2 = false) { - EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); - EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry()); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", false, false); - EXPECT_EQ("https://www.lyft.com/foo", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - EXPECT_EQ(nullptr, config.route(headers, 0)->decorator()); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("api.lyft.com", "/foo", false, true); - EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry()); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("api.lyft.com", "/foo", false, false); - EXPECT_EQ("https://api.lyft.com/foo", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/host", false, false); - EXPECT_EQ("http://new.lyft.com/host", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/path", true, false); - EXPECT_EQ("https://redirect.lyft.com/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/host_path", true, false); - EXPECT_EQ("https://new.lyft.com/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - if (!test_v2) { - return; - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("direct.example.com", "/gone", true, false); - EXPECT_EQ(Http::Code::Gone, config.route(headers, 0)->directResponseEntry()->responseCode()); - EXPECT_EQ("Example text 1", config.route(headers, 0)->directResponseEntry()->responseBody()); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("direct.example.com", "/error", true, false); - EXPECT_EQ(Http::Code::InternalServerError, - config.route(headers, 0)->directResponseEntry()->responseCode()); - EXPECT_EQ("Example text 2", config.route(headers, 0)->directResponseEntry()->responseBody()); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("direct.example.com", "/no_body", true, false); - EXPECT_EQ(Http::Code::OK, config.route(headers, 0)->directResponseEntry()->responseCode()); - EXPECT_TRUE(config.route(headers, 0)->directResponseEntry()->responseBody().empty()); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("direct.example.com", "/static", true, false); - EXPECT_EQ(Http::Code::OK, config.route(headers, 0)->directResponseEntry()->responseCode()); - EXPECT_EQ("Example text 3", config.route(headers, 0)->directResponseEntry()->responseBody()); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("direct.example.com", "/other", true, false); - EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry()); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/https", false, false); - EXPECT_EQ("https://redirect.lyft.com/https", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/host_https", false, false); - EXPECT_EQ("https://new.lyft.com/host_https", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/path_https", false, false); - EXPECT_EQ("https://redirect.lyft.com/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/host_path_https", false, false); - EXPECT_EQ("https://new.lyft.com/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/port", false, false); - EXPECT_EQ("http://redirect.lyft.com:8080/port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com:8080", "/port", false, false); - EXPECT_EQ("http://redirect.lyft.com:8181/port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/host_port", false, false); - EXPECT_EQ("http://new.lyft.com:8080/host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/scheme_host_port", false, false); - EXPECT_EQ("ws://new.lyft.com:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com:80", "/ws", true, false); - EXPECT_EQ("ws://redirect.lyft.com:80/ws", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com:80", "/host_path_https", false, false); - EXPECT_EQ("https://new.lyft.com/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com:80", "/scheme_host_port", false, false); - EXPECT_EQ("ws://new.lyft.com:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com:443", "/ws", false, false); - EXPECT_EQ("ws://redirect.lyft.com:443/ws", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com:443", "/host_path_http", true, false); - EXPECT_EQ("http://new.lyft.com/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com:443", "/scheme_host_port", true, false); - EXPECT_EQ("ws://new.lyft.com:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1", "/port", false, false); - EXPECT_EQ("http://10.0.0.1:8080/port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1:8080", "/port", false, false); - EXPECT_EQ("http://10.0.0.1:8181/port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1", "/host_port", false, false); - EXPECT_EQ("http://20.0.0.2:8080/host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("10.0.0.1", "/scheme_host_port", false, false); - EXPECT_EQ("ws://20.0.0.2:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1:80", "/ws", true, false); - EXPECT_EQ("ws://10.0.0.1:80/ws", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("10.0.0.1:80", "/host_path_https", false, false); - EXPECT_EQ("https://20.0.0.2/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("10.0.0.1:80", "/scheme_host_port", false, false); - EXPECT_EQ("ws://20.0.0.2:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1:443", "/ws", false, false); - EXPECT_EQ("ws://10.0.0.1:443/ws", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("10.0.0.1:443", "/host_path_http", true, false); - EXPECT_EQ("http://20.0.0.2/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("10.0.0.1:443", "/scheme_host_port", true, false); - EXPECT_EQ("ws://20.0.0.2:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]", "/port", false, false); - - EXPECT_EQ("http://[fe80::1]:8080/port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]:8080", "/port", false, false); - EXPECT_EQ("http://[fe80::1]:8181/port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]", "/host_port", false, false); - EXPECT_EQ("http://[fe80::2]:8080/host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("[fe80::1]", "/scheme_host_port", false, false); - EXPECT_EQ("ws://[fe80::2]:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]:80", "/ws", true, false); - EXPECT_EQ("ws://[fe80::1]:80/ws", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("[fe80::1]:80", "/host_path_https", false, false); - EXPECT_EQ("https://[fe80::2]/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("[fe80::1]:80", "/scheme_host_port", false, false); - EXPECT_EQ("ws://[fe80::2]:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]:443", "/ws", false, false); - EXPECT_EQ("ws://[fe80::1]:443/ws", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("[fe80::1]:443", "/host_path_http", true, false); - EXPECT_EQ("http://[fe80::2]/new_path", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - { - Http::TestHeaderMapImpl headers = - genRedirectHeaders("[fe80::1]:443", "/scheme_host_port", true, false); - EXPECT_EQ("ws://[fe80::2]:8080/scheme_host_port", - config.route(headers, 0)->directResponseEntry()->newPath(headers)); - } - }; - - TestConfigImpl v1_json_config(parseRouteConfigurationFromJson(v1_json), factory_context_, true); - testConfig(v1_json_config); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); + EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry()); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", false, false); + EXPECT_EQ("https://www.lyft.com/foo", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + EXPECT_EQ(nullptr, config.route(headers, 0)->decorator()); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("api.lyft.com", "/foo", false, true); + EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry()); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("api.lyft.com", "/foo", false, false); + EXPECT_EQ("https://api.lyft.com/foo", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/host", false, false); + EXPECT_EQ("http://new.lyft.com/host", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("redirect.lyft.com", "/path", true, false); + EXPECT_EQ("https://redirect.lyft.com/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/host_path", true, false); + EXPECT_EQ("https://new.lyft.com/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("direct.example.com", "/gone", true, false); + EXPECT_EQ(Http::Code::Gone, config.route(headers, 0)->directResponseEntry()->responseCode()); + EXPECT_EQ("Example text 1", config.route(headers, 0)->directResponseEntry()->responseBody()); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("direct.example.com", "/error", true, false); + EXPECT_EQ(Http::Code::InternalServerError, + config.route(headers, 0)->directResponseEntry()->responseCode()); + EXPECT_EQ("Example text 2", config.route(headers, 0)->directResponseEntry()->responseBody()); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("direct.example.com", "/no_body", true, false); + EXPECT_EQ(Http::Code::OK, config.route(headers, 0)->directResponseEntry()->responseCode()); + EXPECT_TRUE(config.route(headers, 0)->directResponseEntry()->responseBody().empty()); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("direct.example.com", "/static", true, false); + EXPECT_EQ(Http::Code::OK, config.route(headers, 0)->directResponseEntry()->responseCode()); + EXPECT_EQ("Example text 3", config.route(headers, 0)->directResponseEntry()->responseBody()); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("direct.example.com", "/other", true, false); + EXPECT_EQ(nullptr, config.route(headers, 0)->directResponseEntry()); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/https", false, false); + EXPECT_EQ("https://redirect.lyft.com/https", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/host_https", false, false); + EXPECT_EQ("https://new.lyft.com/host_https", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path_https", false, false); + EXPECT_EQ("https://redirect.lyft.com/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/host_path_https", false, false); + EXPECT_EQ("https://new.lyft.com/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/port", false, false); + EXPECT_EQ("http://redirect.lyft.com:8080/port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com:8080", "/port", false, false); + EXPECT_EQ("http://redirect.lyft.com:8181/port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/host_port", false, false); + EXPECT_EQ("http://new.lyft.com:8080/host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/scheme_host_port", false, false); + EXPECT_EQ("ws://new.lyft.com:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com:80", "/ws", true, false); + EXPECT_EQ("ws://redirect.lyft.com:80/ws", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com:80", "/host_path_https", false, false); + EXPECT_EQ("https://new.lyft.com/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com:80", "/scheme_host_port", false, false); + EXPECT_EQ("ws://new.lyft.com:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com:443", "/ws", false, false); + EXPECT_EQ("ws://redirect.lyft.com:443/ws", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com:443", "/host_path_http", true, false); + EXPECT_EQ("http://new.lyft.com/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com:443", "/scheme_host_port", true, false); + EXPECT_EQ("ws://new.lyft.com:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1", "/port", false, false); + EXPECT_EQ("http://10.0.0.1:8080/port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1:8080", "/port", false, false); + EXPECT_EQ("http://10.0.0.1:8181/port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1", "/host_port", false, false); + EXPECT_EQ("http://20.0.0.2:8080/host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("10.0.0.1", "/scheme_host_port", false, false); + EXPECT_EQ("ws://20.0.0.2:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1:80", "/ws", true, false); + EXPECT_EQ("ws://10.0.0.1:80/ws", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("10.0.0.1:80", "/host_path_https", false, false); + EXPECT_EQ("https://20.0.0.2/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("10.0.0.1:80", "/scheme_host_port", false, false); + EXPECT_EQ("ws://20.0.0.2:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("10.0.0.1:443", "/ws", false, false); + EXPECT_EQ("ws://10.0.0.1:443/ws", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("10.0.0.1:443", "/host_path_http", true, false); + EXPECT_EQ("http://20.0.0.2/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("10.0.0.1:443", "/scheme_host_port", true, false); + EXPECT_EQ("ws://20.0.0.2:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]", "/port", false, false); - TestConfigImpl v2_yaml_config(parseRouteConfigurationFromV2Yaml(v2_yaml), factory_context_, true); - testConfig(v2_yaml_config, true); + EXPECT_EQ("http://[fe80::1]:8080/port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]:8080", "/port", false, false); + EXPECT_EQ("http://[fe80::1]:8181/port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]", "/host_port", false, false); + EXPECT_EQ("http://[fe80::2]:8080/host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("[fe80::1]", "/scheme_host_port", false, false); + EXPECT_EQ("ws://[fe80::2]:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]:80", "/ws", true, false); + EXPECT_EQ("ws://[fe80::1]:80/ws", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("[fe80::1]:80", "/host_path_https", false, false); + EXPECT_EQ("https://[fe80::2]/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("[fe80::1]:80", "/scheme_host_port", false, false); + EXPECT_EQ("ws://[fe80::2]:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = genRedirectHeaders("[fe80::1]:443", "/ws", false, false); + EXPECT_EQ("ws://[fe80::1]:443/ws", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("[fe80::1]:443", "/host_path_http", true, false); + EXPECT_EQ("http://[fe80::2]/new_path", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestHeaderMapImpl headers = + genRedirectHeaders("[fe80::1]:443", "/scheme_host_port", true, false); + EXPECT_EQ("ws://[fe80::2]:8080/scheme_host_port", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } } TEST_F(RouteMatcherTest, ExclusiveRouteEntryOrDirectResponseEntry) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - }, - { - "name": "redirect", - "domains": ["redirect.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "host_redirect": "new.lyft.com" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + cluster: www2 +- name: redirect + domains: + - redirect.lyft.com + routes: + - match: + prefix: "/foo" + redirect: + host_redirect: new.lyft.com )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); { Http::TestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -3264,40 +3110,34 @@ TEST_F(RouteMatcherTest, ExclusiveRouteEntryOrDirectResponseEntry) { } TEST_F(RouteMatcherTest, ExclusiveWeightedClustersEntryOrDirectResponseEntry) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "weighted_clusters": { - "clusters" : [{ "name" : "www2", "weight" : 100 }] - } - } - ] - }, - { - "name": "redirect", - "domains": ["redirect.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "host_redirect": "new.lyft.com" - }, - { - "prefix": "/foo1", - "host_redirect": "[fe80::1]" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + weighted_clusters: + clusters: + - name: www2 + weight: 100 +- name: redirect + domains: + - redirect.lyft.com + routes: + - match: + prefix: "/foo" + redirect: + host_redirect: new.lyft.com + - match: + prefix: "/foo1" + redirect: + host_redirect: "[fe80::1]" )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); { Http::TestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -3514,80 +3354,64 @@ TEST_F(RouteMatcherTest, WeightedClusters) { } TEST_F(RouteMatcherTest, ExclusiveWeightedClustersOrClusterConfig) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "weighted_clusters": { - "clusters" : [ - { "name" : "cluster1", "weight" : 30 }, - { "name" : "cluster2", "weight" : 30 }, - { "name" : "cluster3", "weight" : 40 } - ] - }, - "cluster" : "www2" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + weighted_clusters: + clusters: + - name: cluster1 + weight: 30 + - name: cluster2 + weight: 30 + - name: cluster3 + weight: 40 + cluster: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(RouteMatcherTest, WeightedClustersMissingClusterList) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "weighted_clusters": { - "runtime_key_prefix" : "www2" - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + weighted_clusters: + runtime_key_prefix: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(RouteMatcherTest, WeightedClustersEmptyClustersList) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "weighted_clusters": { - "runtime_key_prefix" : "www2", - "clusters" : [] - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + weighted_clusters: + runtime_key_prefix: www2 + clusters: [] )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } @@ -3637,55 +3461,46 @@ TEST_F(RouteMatcherTest, WeightedClustersSumOFWeightsNotEqualToMax) { } TEST_F(RouteMatcherTest, TestWeightedClusterWithMissingWeights) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "weighted_clusters": { - "clusters" : [ - { "name" : "cluster1", "weight" : 50 }, - { "name" : "cluster2", "weight" : 50 }, - { "name" : "cluster3"} - ] - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + weighted_clusters: + clusters: + - name: cluster1 + weight: 50 + - name: cluster2 + weight: 50 + - name: cluster3 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(RouteMatcherTest, TestWeightedClusterInvalidClusterName) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "weighted_clusters": { - "clusters" : [ - { "name" : "cluster1", "weight" : 33 }, - { "name" : "cluster2", "weight" : 33 }, - { "name" : "cluster3-invalid", "weight": 34} - ] - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + weighted_clusters: + clusters: + - name: cluster1 + weight: 33 + - name: cluster2 + weight: 33 + - name: cluster3-invalid + weight: 34 )EOF"; EXPECT_CALL(factory_context_.cluster_manager_, get("cluster1")) @@ -3695,7 +3510,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterInvalidClusterName) { EXPECT_CALL(factory_context_.cluster_manager_, get("cluster3-invalid")) .WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } @@ -3776,241 +3591,194 @@ TEST(NullConfigImplTest, All) { class BadHttpRouteConfigurationsTest : public testing::Test, public ConfigImplTestBase {}; TEST_F(BadHttpRouteConfigurationsTest, BadRouteConfig) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - } - ], - "fake_entry" : "fake_type" - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: www2 +fake_entry: fake_type )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(BadHttpRouteConfigurationsTest, BadVirtualHostConfig) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "router" : { - "cluster" : "my_cluster" - }, - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + router: + cluster: my_cluster + routes: + - match: + prefix: "/" + route: + cluster: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfig) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "www2", - "timeout_ms" : "1234" - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: www2 + timeout: 1234s )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException); } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPath) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "path": "/foo", - "cluster": "www2" - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + path: "/foo" + route: + cluster: www2 )EOF"; - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), EnvoyException, - "routes must specify one of prefix/path/regex"); + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, + "invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for type " + "oneof"); } -TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "regex": "/[bc]at", - "cluster": "www2" - } - ] - } - ] - } +TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - route: + cluster: www2 )EOF"; - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), EnvoyException, - "routes must specify one of prefix/path/regex"); + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, "RouteValidationError.Match: \\[\"value is required\"\\]"); } -TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "path": "/foo", - "regex": "/[bc]at", - "cluster": "www2" - } - ] - } - ] - } +TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + regex: "/[bc]at" + route: + cluster: www2 )EOF"; - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), EnvoyException, - "routes must specify one of prefix/path/regex"); - ; + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, + "invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for type " + "oneof"); } -TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "path": "/foo", - "regex": "/[bc]at", - "cluster": "www2" - } - ] - } - ] - } +TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoAction) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/api" )EOF"; - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), EnvoyException, - "routes must specify one of prefix/path/regex"); + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, "caused by field: \"action\", reason: is required"); } -TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "cluster": "www2" - } - ] - } - ] - } +TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + path: "/foo" + regex: "/[bc]at" + route: + cluster: www2 )EOF"; - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), EnvoyException, - "routes must specify one of prefix/path/regex"); + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, + "invalid value oneof field 'path_specifier' is already set. Cannot set 'path' for type " + "oneof"); } -TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoRedirectNoClusters) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/api" - } - ] - } - ] - } +TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + path: "/foo" + regex: "/[bc]at" + route: + cluster: www2 )EOF"; - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), EnvoyException, - "routes must have redirect or one of cluster/cluster_header/weighted_clusters") + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, "invalid value oneof field 'path_specifier' is already set."); } TEST_F(RouteMatcherTest, TestOpaqueConfig) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "default", - "domains": ["*"], - "routes": [ - { - "prefix": "/api", - "cluster": "ats", - "opaque_config" : { - "name1": "value1", - "name2": "value2" - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: default + domains: + - "*" + routes: + - match: + prefix: "/api" + route: + cluster: ats + metadata: + filter_metadata: + envoy.router: + name1: value1 + name2: value2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); const std::multimap& opaque_config = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->opaqueConfig(); @@ -4022,88 +3790,61 @@ TEST_F(RouteMatcherTest, TestOpaqueConfig) { class RoutePropertyTest : public testing::Test, public ConfigImplTestBase {}; TEST_F(RoutePropertyTest, excludeVHRateLimits) { - std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - } - ] - } + std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: www2 )EOF"; Http::TestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); std::unique_ptr config_ptr; - config_ptr = std::make_unique(parseRouteConfigurationFromJson(json), + config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); - json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "www2", - "rate_limits": [ - { - "actions": [ - { - "type": "remote_address" - } - ] - } - ] - } - ] - } - ] - } + yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: www2 + rate_limits: + - actions: + - remote_address: {} )EOF"; - config_ptr = std::make_unique(parseRouteConfigurationFromJson(json), + config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_FALSE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); - json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "www2", - "include_vh_rate_limits": true, - "rate_limits": [ - { - "actions": [ - { - "type": "remote_address" - } - ] - } - ] - } - ] - } - ] - } + yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: www2 + include_vh_rate_limits: true + rate_limits: + - actions: + - remote_address: {} )EOF"; - config_ptr = std::make_unique(parseRouteConfigurationFromJson(json), + config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); } @@ -4218,32 +3959,27 @@ TEST_F(RoutePropertyTest, TestRouteCorsConfig) { } TEST_F(RoutePropertyTest, TestVHostCorsLegacyConfig) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "default", - "domains": ["*"], - "cors" : { - "allow_origin": ["test-origin"], - "allow_methods": "test-methods", - "allow_headers": "test-headers", - "expose_headers": "test-expose-headers", - "max_age": "test-max-age", - "allow_credentials": true - }, - "routes": [ - { - "prefix": "/api", - "cluster": "ats" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: default + domains: + - "*" + cors: + allow_origin: + - test-origin + allow_methods: test-methods + allow_headers: test-headers + expose_headers: test-expose-headers + max_age: test-max-age + allow_credentials: true + routes: + - match: + prefix: "/api" + route: + cluster: ats )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0) @@ -4262,32 +3998,27 @@ TEST_F(RoutePropertyTest, TestVHostCorsLegacyConfig) { } TEST_F(RoutePropertyTest, TestRouteCorsLegacyConfig) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "default", - "domains": ["*"], - "routes": [ - { - "prefix": "/api", - "cluster": "ats", - "cors" : { - "allow_origin": ["test-origin"], - "allow_methods": "test-methods", - "allow_headers": "test-headers", - "expose_headers": "test-expose-headers", - "max_age": "test-max-age", - "allow_credentials": true - } - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: default + domains: + - "*" + routes: + - match: + prefix: "/api" + route: + cluster: ats + cors: + allow_origin: + - test-origin + allow_methods: test-methods + allow_headers: test-headers + expose_headers: test-expose-headers + max_age: test-max-age + allow_credentials: true )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->corsPolicy(); @@ -4302,58 +4033,26 @@ TEST_F(RoutePropertyTest, TestRouteCorsLegacyConfig) { EXPECT_EQ(cors_policy->allowCredentials(), true); } -TEST_F(RoutePropertyTest, TestBadCorsConfig) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "default", - "domains": ["*"], - "routes": [ - { - "prefix": "/api", - "cluster": "ats", - "cors" : { - "enabled": "true", - "allow_credentials": "true" - } - } - ] - } - ] -} -)EOF"; - - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromJson(json), factory_context_, true), - EnvoyException); -} - TEST_F(RouteMatcherTest, Decorator) { - const std::string json = R"EOF( -{ - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "prefix": "/foo", - "cluster": "foo", - "decorator": { - "operation": "myFoo" - } - }, - { - "prefix": "/bar", - "cluster": "bar" - } - ] - } - ] -} + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster: foo + decorator: + operation: myFoo + - match: + prefix: "/bar" + route: + cluster: bar )EOF"; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); { Http::TestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); @@ -4372,49 +4071,36 @@ TEST_F(RouteMatcherTest, Decorator) { class CustomRequestHeadersTest : public testing::Test, public ConfigImplTestBase {}; TEST_F(CustomRequestHeadersTest, AddNewHeader) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": [ - "lyft.com", - "www.lyft.com", - "w.lyft.com", - "ww.lyft.com", - "wwww.lyft.com" - ], - "request_headers_to_add": [ - { - "key": "x-client-ip", - "value": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" - } - ], - "routes": [ - { - "prefix": "/new_endpoint", - "prefix_rewrite": "/api/new_endpoint", - "cluster": "www2", - "request_headers_to_add": [ - { - "key": "x-client-ip", - "value": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" - } - ] - } - ] - } - ], - "request_headers_to_add": [ - { - "key": "x-client-ip", - "value": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - lyft.com + - www.lyft.com + - w.lyft.com + - ww.lyft.com + - wwww.lyft.com + request_headers_to_add: + - header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + routes: + - match: + prefix: "/new_endpoint" + route: + prefix_rewrite: "/api/new_endpoint" + cluster: www2 + request_headers_to_add: + - header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" +request_headers_to_add: +- header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); Http::TestHeaderMapImpl headers = genHeaders("www.lyft.com", "/new_endpoint/foo", "GET"); const RouteEntry* route = config.route(headers, 0)->routeEntry(); route->finalizeRequestHeaders(headers, stream_info, true); @@ -4422,50 +4108,37 @@ TEST_F(CustomRequestHeadersTest, AddNewHeader) { } TEST_F(CustomRequestHeadersTest, CustomHeaderWrongFormat) { - const std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": [ - "lyft.com", - "www.lyft.com", - "w.lyft.com", - "ww.lyft.com", - "wwww.lyft.com" - ], - "request_headers_to_add": [ - { - "key": "x-client-ip", - "value": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" - } - ], - "routes": [ - { - "prefix": "/new_endpoint", - "prefix_rewrite": "/api/new_endpoint", - "cluster": "www2", - "request_headers_to_add": [ - { - "key": "x-client-ip", - "value": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT" - } - ] - } - ] - } - ], - "request_headers_to_add": [ - { - "key": "x-client-ip", - "value": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT" - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - lyft.com + - www.lyft.com + - w.lyft.com + - ww.lyft.com + - wwww.lyft.com + request_headers_to_add: + - header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + routes: + - match: + prefix: "/new_endpoint" + route: + prefix_rewrite: "/api/new_endpoint" + cluster: www2 + request_headers_to_add: + - header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT" +request_headers_to_add: +- header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT" )EOF"; NiceMock stream_info; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl config(parseRouteConfigurationFromJson(json), factory_context_, true), + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException, "Invalid header configuration. Un-terminated variable expression " "'DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT'"); @@ -5259,10 +4932,8 @@ name: ZeroIdleTimeout idle_timeout: 0s )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ZeroIdleTimeout), factory_context_, true); - Http::TestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); - const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); - EXPECT_EQ(0, route_entry->idleTimeout().value().count()); + EXPECT_THROW_WITH_REGEX(parseRouteConfigurationFromV2Yaml(ZeroIdleTimeout), EnvoyException, + "value must be greater than \" \"0s"); } TEST_F(RouteConfigurationV2, ExplicitIdleTimeout) { From e1450a1ca006f7d8b118a63b0f7e30be2639b881 Mon Sep 17 00:00:00 2001 From: Dan Rosen Date: Tue, 26 Mar 2019 23:20:21 -0400 Subject: [PATCH 021/165] init: replace old init manager with with new "safe" init manager (#6360) Signed-off-by: Dan Rosen --- include/envoy/init/BUILD | 21 ++++- include/envoy/init/init.h | 66 ---------------- include/envoy/{safe_init => init}/manager.h | 14 ++-- include/envoy/{safe_init => init}/target.h | 10 +-- include/envoy/{safe_init => init}/watcher.h | 10 +-- include/envoy/router/BUILD | 1 - .../router/route_config_provider_manager.h | 1 - include/envoy/safe_init/BUILD | 31 -------- include/envoy/server/BUILD | 6 +- include/envoy/server/filter_config.h | 2 +- include/envoy/server/instance.h | 2 +- .../envoy/server/transport_socket_config.h | 2 +- source/common/config/BUILD | 3 +- source/common/config/config_provider_impl.cc | 9 +-- source/common/config/config_provider_impl.h | 36 +++------ source/common/{safe_init => init}/BUILD | 6 +- .../{safe_init => init}/manager_impl.cc | 6 +- .../common/{safe_init => init}/manager_impl.h | 14 ++-- .../common/{safe_init => init}/target_impl.cc | 6 +- .../common/{safe_init => init}/target_impl.h | 10 +-- .../{safe_init => init}/watcher_impl.cc | 6 +- .../common/{safe_init => init}/watcher_impl.h | 10 +-- source/common/router/BUILD | 2 +- source/common/router/rds_impl.cc | 24 ++---- source/common/router/rds_impl.h | 18 +---- source/common/secret/BUILD | 3 +- source/common/secret/sds_api.cc | 33 +++----- source/common/secret/sds_api.h | 14 ++-- source/common/upstream/BUILD | 3 +- source/common/upstream/cluster_factory_impl.h | 2 - source/common/upstream/upstream_impl.cc | 5 +- source/common/upstream/upstream_impl.h | 13 ++- source/server/BUILD | 18 +---- source/server/config_validation/server.cc | 3 +- source/server/config_validation/server.h | 3 +- source/server/http/admin.cc | 2 +- source/server/init_manager_impl.cc | 66 ---------------- source/server/init_manager_impl.h | 40 ---------- source/server/lds_api.cc | 21 ++--- source/server/lds_api.h | 11 +-- source/server/listener_manager_impl.cc | 14 ++-- source/server/listener_manager_impl.h | 12 ++- source/server/server.cc | 33 +++----- source/server/server.h | 10 +-- .../config/config_provider_impl_test.cc | 3 +- test/common/{safe_init => init}/BUILD | 8 +- .../{safe_init => init}/manager_impl_test.cc | 22 +++--- .../{safe_init => init}/target_impl_test.cc | 14 ++-- .../{safe_init => init}/watcher_impl_test.cc | 12 +-- test/common/router/BUILD | 1 - test/common/router/rds_impl_test.cc | 36 +++++---- test/common/secret/sds_api_test.cc | 11 ++- test/integration/ads_integration_test.cc | 79 +++++++++++++++++++ .../sds_dynamic_integration_test.cc | 1 - .../sds_static_integration_test.cc | 1 - test/mocks/init/BUILD | 5 +- test/mocks/init/mocks.cc | 38 ++++----- test/mocks/init/mocks.h | 69 ++++++++++------ test/mocks/router/BUILD | 1 - test/mocks/router/mocks.h | 1 - test/mocks/safe_init/BUILD | 20 ----- test/mocks/safe_init/mocks.cc | 25 ------ test/mocks/safe_init/mocks.h | 66 ---------------- test/server/BUILD | 10 --- test/server/http/admin_test.cc | 2 +- test/server/init_manager_impl_test.cc | 69 ---------------- test/server/lds_api_test.cc | 41 ++++++---- test/server/listener_manager_impl_test.cc | 24 +++--- test/server/server_test.cc | 10 +-- 69 files changed, 424 insertions(+), 767 deletions(-) delete mode 100644 include/envoy/init/init.h rename include/envoy/{safe_init => init}/manager.h (89%) rename include/envoy/{safe_init => init}/target.h (82%) rename include/envoy/{safe_init => init}/watcher.h (80%) delete mode 100644 include/envoy/safe_init/BUILD rename source/common/{safe_init => init}/BUILD (80%) rename source/common/{safe_init => init}/manager_impl.cc (97%) rename source/common/{safe_init => init}/manager_impl.h (82%) rename source/common/{safe_init => init}/target_impl.cc (95%) rename source/common/{safe_init => init}/target_impl.h (95%) rename source/common/{safe_init => init}/watcher_impl.cc (93%) rename source/common/{safe_init => init}/watcher_impl.h (94%) delete mode 100644 source/server/init_manager_impl.cc delete mode 100644 source/server/init_manager_impl.h rename test/common/{safe_init => init}/BUILD (69%) rename test/common/{safe_init => init}/manager_impl_test.cc (86%) rename test/common/{safe_init => init}/target_impl_test.cc (82%) rename test/common/{safe_init => init}/watcher_impl_test.cc (72%) delete mode 100644 test/mocks/safe_init/BUILD delete mode 100644 test/mocks/safe_init/mocks.cc delete mode 100644 test/mocks/safe_init/mocks.h delete mode 100644 test/server/init_manager_impl_test.cc diff --git a/include/envoy/init/BUILD b/include/envoy/init/BUILD index cfa069239b96f..2229d7c7a12e4 100644 --- a/include/envoy/init/BUILD +++ b/include/envoy/init/BUILD @@ -9,6 +9,23 @@ load( envoy_package() envoy_cc_library( - name = "init_interface", - hdrs = ["init.h"], + name = "watcher_interface", + hdrs = ["watcher.h"], +) + +envoy_cc_library( + name = "target_interface", + hdrs = ["target.h"], + deps = [ + ":watcher_interface", + ], +) + +envoy_cc_library( + name = "manager_interface", + hdrs = ["manager.h"], + deps = [ + ":target_interface", + ":watcher_interface", + ], ) diff --git a/include/envoy/init/init.h b/include/envoy/init/init.h deleted file mode 100644 index 338511c3545b8..0000000000000 --- a/include/envoy/init/init.h +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once - -#include - -#include "envoy/common/pure.h" - -#include "absl/strings/string_view.h" - -namespace Envoy { -namespace Init { - -/** - * A single initialization target. Deprecated, use SafeInit::Target instead. - * TODO(mergeconflict): convert all Init::Target implementations to SafeInit::TargetImpl. - */ -class Target { -public: - virtual ~Target() {} - - /** - * Called when the target should begin its own initialization. - * @param callback supplies the callback to invoke when the target has completed its - * initialization. - */ - virtual void initialize(std::function callback) PURE; -}; - -/** - * A manager that initializes multiple targets. Deprecated, use SafeInit::Manager instead. - * TODO(mergeconflict): convert all Init::Manager uses to SafeInit::Manager. - */ -class Manager { -public: - virtual ~Manager() {} - - /** - * Register a target to be initialized in the future. The manager will call initialize() on each - * target at some point in the future. It is an error to register the same target more than once. - * @param target the Target to initialize. - * @param description a human-readable description of target used for logging and debugging. - */ - virtual void registerTarget(Target& target, absl::string_view description) PURE; - - enum class State { - /** - * Targets have not been initialized. - */ - NotInitialized, - /** - * Targets are currently being initialized. - */ - Initializing, - /** - * All targets have been initialized. - */ - Initialized - }; - - /** - * Returns the current state of the init manager. - */ - virtual State state() const PURE; -}; - -} // namespace Init -} // namespace Envoy diff --git a/include/envoy/safe_init/manager.h b/include/envoy/init/manager.h similarity index 89% rename from include/envoy/safe_init/manager.h rename to include/envoy/init/manager.h index a94718fbd2869..94cf0dbb25e1a 100644 --- a/include/envoy/safe_init/manager.h +++ b/include/envoy/init/manager.h @@ -1,14 +1,14 @@ #pragma once #include "envoy/common/pure.h" -#include "envoy/safe_init/target.h" -#include "envoy/safe_init/watcher.h" +#include "envoy/init/target.h" +#include "envoy/init/watcher.h" namespace Envoy { -namespace SafeInit { +namespace Init { /** - * SafeInit::Manager coordinates initialization of one or more "targets." A typical flow would be: + * Init::Manager coordinates initialization of one or more "targets." A typical flow would be: * * - One or more initialization targets are registered with a manager using `add`. * - The manager is told to `initialize` all its targets, given a Watcher to notify when all @@ -21,14 +21,14 @@ namespace SafeInit { * Since there are several entities involved in this flow -- the owner of the manager, the targets * registered with the manager, and the manager itself -- it may be difficult or impossible in some * cases to guarantee that their lifetimes line up correctly to avoid use-after-free errors. The - * interface design here in SafeInit allows implementations to avoid the issue: + * interface design here in Init allows implementations to avoid the issue: * * - A Target can only be initialized via a TargetHandle, which acts as a weak reference. * Attempting to initialize a destroyed Target via its handle has no ill effects. * - Likewise, a Watcher can only be notified that initialization was complete via a * WatcherHandle, which acts as a weak reference as well. * - * See target.h and watcher.h, as well as implementation in source/common/safe_init for details. + * See target.h and watcher.h, as well as implementation in source/common/init for details. */ struct Manager { virtual ~Manager() = default; @@ -75,5 +75,5 @@ struct Manager { virtual void initialize(const Watcher& watcher) PURE; }; -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/include/envoy/safe_init/target.h b/include/envoy/init/target.h similarity index 82% rename from include/envoy/safe_init/target.h rename to include/envoy/init/target.h index 25dd958d3a646..9ab46d38aff48 100644 --- a/include/envoy/safe_init/target.h +++ b/include/envoy/init/target.h @@ -3,17 +3,17 @@ #include #include "envoy/common/pure.h" -#include "envoy/safe_init/watcher.h" +#include "envoy/init/watcher.h" #include "absl/strings/string_view.h" namespace Envoy { -namespace SafeInit { +namespace Init { /** * A TargetHandle functions as a weak reference to a Target. It is how an implementation of - * SafeInit::Manager would safely tell a target to `initialize` with no guarantees about the - * target's lifetime. Typical usage (outside of SafeInit::ManagerImpl) does not require touching + * Init::Manager would safely tell a target to `initialize` with no guarantees about the + * target's lifetime. Typical usage (outside of Init::ManagerImpl) does not require touching * TargetHandles at all. */ struct TargetHandle { @@ -48,5 +48,5 @@ struct Target { virtual TargetHandlePtr createHandle(absl::string_view name) const PURE; }; -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/include/envoy/safe_init/watcher.h b/include/envoy/init/watcher.h similarity index 80% rename from include/envoy/safe_init/watcher.h rename to include/envoy/init/watcher.h index b9eb0cf08959e..ccf17adfcbafc 100644 --- a/include/envoy/safe_init/watcher.h +++ b/include/envoy/init/watcher.h @@ -7,14 +7,14 @@ #include "absl/strings/string_view.h" namespace Envoy { -namespace SafeInit { +namespace Init { /** * A WatcherHandle functions as a weak reference to a Watcher. It is how an implementation of - * SafeInit::Target would safely notify a Manager that it has initialized, and likewise it's how - * an implementation of SafeInit::Manager would safely tell its client that all registered targets + * Init::Target would safely notify a Manager that it has initialized, and likewise it's how + * an implementation of Init::Manager would safely tell its client that all registered targets * have initialized, with no guarantees about the lifetimes of the manager or client. Typical usage - * (outside of SafeInit::TargetImpl and ManagerImpl) does not require touching WatcherHandles at + * (outside of Init::TargetImpl and ManagerImpl) does not require touching WatcherHandles at * all. */ struct WatcherHandle { @@ -49,5 +49,5 @@ struct Watcher { virtual WatcherHandlePtr createHandle(absl::string_view name) const PURE; }; -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index f47a6db92ec04..1952414e09d19 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -20,7 +20,6 @@ envoy_cc_library( deps = [ ":rds_interface", "//include/envoy/event:dispatcher_interface", - "//include/envoy/init:init_interface", "//include/envoy/json:json_object_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/runtime:runtime_interface", diff --git a/include/envoy/router/route_config_provider_manager.h b/include/envoy/router/route_config_provider_manager.h index 3070a8dc407d9..9bf02066a6009 100644 --- a/include/envoy/router/route_config_provider_manager.h +++ b/include/envoy/router/route_config_provider_manager.h @@ -4,7 +4,6 @@ #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" #include "envoy/event/dispatcher.h" -#include "envoy/init/init.h" #include "envoy/json/json_object.h" #include "envoy/local_info/local_info.h" #include "envoy/router/rds.h" diff --git a/include/envoy/safe_init/BUILD b/include/envoy/safe_init/BUILD deleted file mode 100644 index 2229d7c7a12e4..0000000000000 --- a/include/envoy/safe_init/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "watcher_interface", - hdrs = ["watcher.h"], -) - -envoy_cc_library( - name = "target_interface", - hdrs = ["target.h"], - deps = [ - ":watcher_interface", - ], -) - -envoy_cc_library( - name = "manager_interface", - hdrs = ["manager.h"], - deps = [ - ":target_interface", - ":watcher_interface", - ], -) diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index 536710d325617..c81fcccfb656e 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -99,7 +99,7 @@ envoy_cc_library( "//include/envoy/event:timer_interface", "//include/envoy/http:context_interface", "//include/envoy/http:query_params_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/secret:secret_manager_interface", @@ -151,7 +151,7 @@ envoy_cc_library( "//include/envoy/http:codes_interface", "//include/envoy/http:context_interface", "//include/envoy/http:filter_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/json:json_object_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/network:drain_decision_interface", @@ -196,7 +196,7 @@ envoy_cc_library( hdrs = ["transport_socket_config.h"], deps = [ "//include/envoy/event:dispatcher_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/network:transport_socket_interface", "//include/envoy/runtime:runtime_interface", diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h index 0f9234addd7d8..2f433b1e6b656 100644 --- a/include/envoy/server/filter_config.h +++ b/include/envoy/server/filter_config.h @@ -7,7 +7,7 @@ #include "envoy/http/codes.h" #include "envoy/http/context.h" #include "envoy/http/filter.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/json/json_object.h" #include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" diff --git a/include/envoy/server/instance.h b/include/envoy/server/instance.h index a9be0d78ec56d..324960b35ffa0 100644 --- a/include/envoy/server/instance.h +++ b/include/envoy/server/instance.h @@ -9,7 +9,7 @@ #include "envoy/common/mutex_tracer.h" #include "envoy/event/timer.h" #include "envoy/http/context.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/network/listen_socket.h" #include "envoy/runtime/runtime.h" diff --git a/include/envoy/server/transport_socket_config.h b/include/envoy/server/transport_socket_config.h index 15236bd5700ec..939846e08b23a 100644 --- a/include/envoy/server/transport_socket_config.h +++ b/include/envoy/server/transport_socket_config.h @@ -3,7 +3,7 @@ #include #include "envoy/event/dispatcher.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/network/transport_socket.h" #include "envoy/runtime/runtime.h" diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 8071c1855276b..0d41c3e672e7a 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -391,11 +391,12 @@ envoy_cc_library( ":utility_lib", "//include/envoy/config:config_provider_interface", "//include/envoy/config:config_provider_manager_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/server:admin_interface", "//include/envoy/server:config_tracker_interface", "//include/envoy/singleton:instance_interface", "//include/envoy/thread_local:thread_local_interface", + "//source/common/init:target_lib", "//source/common/protobuf", ], ) diff --git a/source/common/config/config_provider_impl.cc b/source/common/config/config_provider_impl.cc index 541c767412aac..da3d65043a96d 100644 --- a/source/common/config/config_provider_impl.cc +++ b/source/common/config/config_provider_impl.cc @@ -16,17 +16,10 @@ ImmutableConfigProviderImplBase::~ImmutableConfigProviderImplBase() { } ConfigSubscriptionInstanceBase::~ConfigSubscriptionInstanceBase() { - runInitializeCallbackIfAny(); + init_target_.ready(); config_provider_manager_.unbindSubscription(manager_identifier_); } -void ConfigSubscriptionInstanceBase::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } -} - bool ConfigSubscriptionInstanceBase::checkAndApplyConfig(const Protobuf::Message& config_proto, const std::string& config_name, const std::string& version_info) { diff --git a/source/common/config/config_provider_impl.h b/source/common/config/config_provider_impl.h index b865165cd4f44..50e916d1d0348 100644 --- a/source/common/config/config_provider_impl.h +++ b/source/common/config/config_provider_impl.h @@ -4,7 +4,7 @@ #include "envoy/config/config_provider.h" #include "envoy/config/config_provider_manager.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/server/admin.h" #include "envoy/server/config_tracker.h" #include "envoy/singleton/instance.h" @@ -13,6 +13,7 @@ #include "common/common/thread.h" #include "common/common/utility.h" #include "common/config/utility.h" +#include "common/init/target_impl.h" #include "common/protobuf/protobuf.h" namespace Envoy { @@ -133,21 +134,14 @@ class MutableConfigProviderImplBase; * This class can not be instantiated directly; instead, it provides the foundation for * config subscription implementations which derive from it. */ -class ConfigSubscriptionInstanceBase : public Init::Target, - protected Logger::Loggable { +class ConfigSubscriptionInstanceBase : protected Logger::Loggable { public: struct LastConfigInfo { uint64_t last_config_hash_; std::string last_config_version_; }; - ~ConfigSubscriptionInstanceBase() override; - - // Init::Target - void initialize(std::function callback) override { - initialize_callback_ = callback; - start(); - } + virtual ~ConfigSubscriptionInstanceBase(); /** * Starts the subscription corresponding to a config source. @@ -166,14 +160,14 @@ class ConfigSubscriptionInstanceBase : public Init::Target, */ void onConfigUpdate() { setLastUpdated(); - runInitializeCallbackIfAny(); + init_target_.ready(); } /** * Must be called by derived classes when the onConfigUpdateFailed() callback associated with the * underlying subscription is issued. */ - void onConfigUpdateFailed() { runInitializeCallbackIfAny(); } + void onConfigUpdateFailed() { init_target_.ready(); } /** * Determines whether a configuration proto is a new update, and if so, propagates it to all @@ -200,21 +194,16 @@ class ConfigSubscriptionInstanceBase : public Init::Target, ConfigProviderManagerImplBase& config_provider_manager, TimeSource& time_source, const SystemTime& last_updated, const LocalInfo::LocalInfo& local_info) - : name_(name), manager_identifier_(manager_identifier), - config_provider_manager_(config_provider_manager), time_source_(time_source), - last_updated_(last_updated) { + : name_(name), init_target_(fmt::format("ConfigSubscriptionInstanceBase {}", name_), + [this]() { start(); }), + manager_identifier_(manager_identifier), config_provider_manager_(config_provider_manager), + time_source_(time_source), last_updated_(last_updated) { Envoy::Config::Utility::checkLocalInfo(name, local_info); } void setLastUpdated() { last_updated_ = time_source_.systemTime(); } - void runInitializeCallbackIfAny(); - private: - void registerInitTarget(Init::Manager& init_manager) { - init_manager.registerTarget(*this, fmt::format("ConfigSubscriptionInstanceBase {}", name_)); - } - void bindConfigProvider(MutableConfigProviderImplBase* provider); void unbindConfigProvider(MutableConfigProviderImplBase* provider) { @@ -222,7 +211,7 @@ class ConfigSubscriptionInstanceBase : public Init::Target, } const std::string name_; - std::function initialize_callback_; + Init::TargetImpl init_target_; std::unordered_set mutable_config_providers_; const uint64_t manager_identifier_; ConfigProviderManagerImplBase& config_provider_manager_; @@ -387,8 +376,7 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl // around it. However, since this is not a performance critical path we err on the side // of simplicity. subscription = subscription_factory_fn(manager_identifier, *this); - - subscription->registerInitTarget(init_manager); + init_manager.add(subscription->init_target_); bindSubscription(manager_identifier, subscription); } else { diff --git a/source/common/safe_init/BUILD b/source/common/init/BUILD similarity index 80% rename from source/common/safe_init/BUILD rename to source/common/init/BUILD index 269cd9fbaace6..6fef3006865be 100644 --- a/source/common/safe_init/BUILD +++ b/source/common/init/BUILD @@ -13,7 +13,7 @@ envoy_cc_library( srcs = ["watcher_impl.cc"], hdrs = ["watcher_impl.h"], deps = [ - "//include/envoy/safe_init:watcher_interface", + "//include/envoy/init:watcher_interface", "//source/common/common:logger_lib", ], ) @@ -23,7 +23,7 @@ envoy_cc_library( srcs = ["target_impl.cc"], hdrs = ["target_impl.h"], deps = [ - "//include/envoy/safe_init:target_interface", + "//include/envoy/init:target_interface", "//source/common/common:logger_lib", ], ) @@ -34,7 +34,7 @@ envoy_cc_library( hdrs = ["manager_impl.h"], deps = [ ":watcher_lib", - "//include/envoy/safe_init:manager_interface", + "//include/envoy/init:manager_interface", "//source/common/common:logger_lib", ], ) diff --git a/source/common/safe_init/manager_impl.cc b/source/common/init/manager_impl.cc similarity index 97% rename from source/common/safe_init/manager_impl.cc rename to source/common/init/manager_impl.cc index a21827c67f2f0..f60ddc64a9e90 100644 --- a/source/common/safe_init/manager_impl.cc +++ b/source/common/init/manager_impl.cc @@ -1,9 +1,9 @@ -#include "common/safe_init/manager_impl.h" +#include "common/init/manager_impl.h" #include "common/common/assert.h" namespace Envoy { -namespace SafeInit { +namespace Init { ManagerImpl::ManagerImpl(absl::string_view name) : name_(fmt::format("init manager {}", name)), state_(State::Uninitialized), count_(0), @@ -75,5 +75,5 @@ void ManagerImpl::ready() { watcher_handle_->ready(); } -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/source/common/safe_init/manager_impl.h b/source/common/init/manager_impl.h similarity index 82% rename from source/common/safe_init/manager_impl.h rename to source/common/init/manager_impl.h index 7a88572422ad7..b92ac102fd729 100644 --- a/source/common/safe_init/manager_impl.h +++ b/source/common/init/manager_impl.h @@ -2,17 +2,17 @@ #include -#include "envoy/safe_init/manager.h" +#include "envoy/init/manager.h" #include "common/common/logger.h" -#include "common/safe_init/watcher_impl.h" +#include "common/init/watcher_impl.h" namespace Envoy { -namespace SafeInit { +namespace Init { /** - * SafeInit::ManagerImpl coordinates initialization of one or more "targets." See comments in - * include/envoy/safe_init/manager.h for an overview. + * Init::ManagerImpl coordinates initialization of one or more "targets." See comments in + * include/envoy/init/manager.h for an overview. * * When the logging level is set to "debug" or "trace," the log will contain entries for all * significant events in the initialization flow: @@ -30,7 +30,7 @@ class ManagerImpl : public Manager, Logger::Loggable { */ ManagerImpl(absl::string_view name); - // SafeInit::Manager + // Init::Manager State state() const override; void add(const Target& target) override; void initialize(const Watcher& watcher) override; @@ -58,5 +58,5 @@ class ManagerImpl : public Manager, Logger::Loggable { std::list target_handles_; }; -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/source/common/safe_init/target_impl.cc b/source/common/init/target_impl.cc similarity index 95% rename from source/common/safe_init/target_impl.cc rename to source/common/init/target_impl.cc index bdc839018e38e..5bf0288b82980 100644 --- a/source/common/safe_init/target_impl.cc +++ b/source/common/init/target_impl.cc @@ -1,7 +1,7 @@ -#include "common/safe_init/target_impl.h" +#include "common/init/target_impl.h" namespace Envoy { -namespace SafeInit { +namespace Init { TargetHandleImpl::TargetHandleImpl(absl::string_view handle_name, absl::string_view name, std::weak_ptr fn) @@ -50,5 +50,5 @@ bool TargetImpl::ready() { return false; } -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/source/common/safe_init/target_impl.h b/source/common/init/target_impl.h similarity index 95% rename from source/common/safe_init/target_impl.h rename to source/common/init/target_impl.h index 675cfceb91eec..ad2757433f0de 100644 --- a/source/common/safe_init/target_impl.h +++ b/source/common/init/target_impl.h @@ -2,12 +2,12 @@ #include -#include "envoy/safe_init/target.h" +#include "envoy/init/target.h" #include "common/common/logger.h" namespace Envoy { -namespace SafeInit { +namespace Init { /** * A target is just a glorified callback function, called by the manager it was registered with. @@ -33,7 +33,7 @@ class TargetHandleImpl : public TargetHandle, Logger::Loggable std::weak_ptr fn); public: - // SafeInit::TargetHandle + // Init::TargetHandle bool initialize(const Watcher& watcher) const override; private: @@ -62,7 +62,7 @@ class TargetImpl : public Target, Logger::Loggable { TargetImpl(absl::string_view name, InitializeFn fn); ~TargetImpl() override; - // SafeInit::Target + // Init::Target absl::string_view name() const override; TargetHandlePtr createHandle(absl::string_view handle_name) const override; @@ -85,5 +85,5 @@ class TargetImpl : public Target, Logger::Loggable { const std::shared_ptr fn_; }; -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/source/common/safe_init/watcher_impl.cc b/source/common/init/watcher_impl.cc similarity index 93% rename from source/common/safe_init/watcher_impl.cc rename to source/common/init/watcher_impl.cc index ee7899f55637f..b69fe3e7cf846 100644 --- a/source/common/safe_init/watcher_impl.cc +++ b/source/common/init/watcher_impl.cc @@ -1,7 +1,7 @@ -#include "common/safe_init/watcher_impl.h" +#include "common/init/watcher_impl.h" namespace Envoy { -namespace SafeInit { +namespace Init { WatcherHandleImpl::WatcherHandleImpl(absl::string_view handle_name, absl::string_view name, std::weak_ptr fn) @@ -34,5 +34,5 @@ WatcherHandlePtr WatcherImpl::createHandle(absl::string_view handle_name) const new WatcherHandleImpl(handle_name, name_, std::weak_ptr(fn_))); } -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/source/common/safe_init/watcher_impl.h b/source/common/init/watcher_impl.h similarity index 94% rename from source/common/safe_init/watcher_impl.h rename to source/common/init/watcher_impl.h index 582fd64910816..816a37c860eb2 100644 --- a/source/common/safe_init/watcher_impl.h +++ b/source/common/init/watcher_impl.h @@ -2,12 +2,12 @@ #include -#include "envoy/safe_init/watcher.h" +#include "envoy/init/watcher.h" #include "common/common/logger.h" namespace Envoy { -namespace SafeInit { +namespace Init { /** * A watcher is just a glorified callback function, called by a target or a manager when @@ -28,7 +28,7 @@ class WatcherHandleImpl : public WatcherHandle, Logger::Loggable fn); public: - // SafeInit::WatcherHandle + // Init::WatcherHandle bool ready() const override; private: @@ -57,7 +57,7 @@ class WatcherImpl : public Watcher, Logger::Loggable { WatcherImpl(absl::string_view name, ReadyFn fn); ~WatcherImpl() override; - // SafeInit::Watcher + // Init::Watcher absl::string_view name() const override; WatcherHandlePtr createHandle(absl::string_view handle_name) const override; @@ -69,5 +69,5 @@ class WatcherImpl : public Watcher, Logger::Loggable { const std::shared_ptr fn_; }; -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/source/common/router/BUILD b/source/common/router/BUILD index b861e6eeb3037..498babb17239e 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -74,7 +74,6 @@ envoy_cc_library( ":config_lib", "//include/envoy/config:subscription_interface", "//include/envoy/http:codes_interface", - "//include/envoy/init:init_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/router:rds_interface", "//include/envoy/router:route_config_provider_manager_interface", @@ -86,6 +85,7 @@ envoy_cc_library( "//source/common/config:rds_json_lib", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", + "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/admin/v2alpha:config_dump_cc", "@envoy_api//envoy/api/v2:rds_cc", diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 3908fa799cd96..9393b7190f77e 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -60,6 +60,8 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( const std::string& stat_prefix, Envoy::Router::RouteConfigProviderManagerImpl& route_config_provider_manager) : route_config_name_(rds.route_config_name()), + init_target_(fmt::format("RdsRouteConfigSubscription {}", route_config_name_), + [this]() { subscription_->start({route_config_name_}, *this); }), scope_(factory_context.scope().createScope(stat_prefix + "rds." + route_config_name_ + ".")), stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}), route_config_provider_manager_(route_config_provider_manager), @@ -77,7 +79,7 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { // If we get destroyed during initialization, make sure we signal that we "initialized". - runInitializeCallbackIfAny(); + init_target_.ready(); // The ownership of RdsRouteConfigProviderImpl is shared among all HttpConnectionManagers that // hold a shared_ptr to it. The RouteConfigProviderManager holds weak_ptrs to the @@ -93,7 +95,7 @@ void RdsRouteConfigSubscription::onConfigUpdate(const ResourceVector& resources, if (resources.empty()) { ENVOY_LOG(debug, "Missing RouteConfiguration for {} in onConfigUpdate()", route_config_name_); stats_.update_empty_.inc(); - runInitializeCallbackIfAny(); + init_target_.ready(); return; } if (resources.size() != 1) { @@ -119,25 +121,13 @@ void RdsRouteConfigSubscription::onConfigUpdate(const ResourceVector& resources, } } - runInitializeCallbackIfAny(); + init_target_.ready(); } void RdsRouteConfigSubscription::onConfigUpdateFailed(const EnvoyException*) { // We need to allow server startup to continue, even if we have a bad // config. - runInitializeCallbackIfAny(); -} - -void RdsRouteConfigSubscription::registerInitTarget(Init::Manager& init_manager) { - init_manager.registerTarget(*this, - fmt::format("RdsRouteConfigSubscription {}", route_config_name_)); -} - -void RdsRouteConfigSubscription::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } + init_target_.ready(); } RdsRouteConfigProviderImpl::RdsRouteConfigProviderImpl( @@ -207,7 +197,7 @@ Router::RouteConfigProviderPtr RouteConfigProviderManagerImpl::createRdsRouteCon subscription.reset(new RdsRouteConfigSubscription(rds, manager_identifier, factory_context, stat_prefix, *this)); - subscription->registerInitTarget(factory_context.initManager()); + factory_context.initManager().add(subscription->init_target_); route_config_subscriptions_.insert({manager_identifier, subscription}); } else { diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 67e086258e21b..d665a3aed8303 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -12,7 +12,6 @@ #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" #include "envoy/config/subscription.h" #include "envoy/http/codes.h" -#include "envoy/init/init.h" #include "envoy/local_info/local_info.h" #include "envoy/router/rds.h" #include "envoy/router/route_config_provider_manager.h" @@ -23,6 +22,7 @@ #include "envoy/thread_local/thread_local.h" #include "common/common/logger.h" +#include "common/init/target_impl.h" #include "common/protobuf/utility.h" namespace Envoy { @@ -94,18 +94,11 @@ class RdsRouteConfigProviderImpl; * RDS config providers. */ class RdsRouteConfigSubscription - : public Init::Target, - Envoy::Config::SubscriptionCallbacks, + : Envoy::Config::SubscriptionCallbacks, Logger::Loggable { public: ~RdsRouteConfigSubscription(); - // Init::Target - void initialize(std::function callback) override { - initialize_callback_ = callback; - subscription_->start({route_config_name_}, *this); - } - // Config::SubscriptionCallbacks // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; @@ -130,12 +123,9 @@ class RdsRouteConfigSubscription const std::string& stat_prefix, RouteConfigProviderManagerImpl& route_config_provider_manager); - void registerInitTarget(Init::Manager& init_manager); - void runInitializeCallbackIfAny(); - - std::unique_ptr> subscription_; - std::function initialize_callback_; const std::string route_config_name_; + Init::TargetImpl init_target_; + std::unique_ptr> subscription_; Stats::ScopePtr scope_; RdsStats stats_; RouteConfigProviderManagerImpl& route_config_provider_manager_; diff --git a/source/common/secret/BUILD b/source/common/secret/BUILD index 5a9f0f94ec7ce..3248ffb331ab8 100644 --- a/source/common/secret/BUILD +++ b/source/common/secret/BUILD @@ -42,7 +42,7 @@ envoy_cc_library( deps = [ "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", - "//include/envoy/init:init_interface", + "//include/envoy/init:manager_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/secret:secret_provider_interface", @@ -52,6 +52,7 @@ envoy_cc_library( "//source/common/common:cleanup_lib", "//source/common/config:resources_lib", "//source/common/config:subscription_factory_lib", + "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", "//source/common/ssl:certificate_validation_context_config_impl_lib", "//source/common/ssl:tls_certificate_config_impl_lib", diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 2471ad98657f6..10f04d5ef6281 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -17,7 +17,8 @@ SdsApi::SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispat const envoy::api::v2::core::ConfigSource& sds_config, const std::string& sds_config_name, std::function destructor_cb, Api::Api& api) - : local_info_(local_info), dispatcher_(dispatcher), random_(random), stats_(stats), + : init_target_(fmt::format("SdsApi {}", sds_config_name), [this] { initialize(); }), + local_info_(local_info), dispatcher_(dispatcher), random_(random), stats_(stats), cluster_manager_(cluster_manager), sds_config_(sds_config), sds_config_name_(sds_config_name), secret_hash_(0), clean_up_(destructor_cb), api_(api) { Config::Utility::checkLocalInfo("sds", local_info_); @@ -25,19 +26,7 @@ SdsApi::SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispat // can be chained together to behave as one init_manager. In that way, we let // two listeners which share same SdsApi to register at separate init managers, and // each init manager has a chance to initialize its targets. - init_manager.registerTarget(*this, fmt::format("SdsApi {}", sds_config_name)); -} - -void SdsApi::initialize(std::function callback) { - initialize_callback_ = callback; - - subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource< - envoy::api::v2::auth::Secret>( - sds_config_, local_info_, dispatcher_, cluster_manager_, random_, stats_, - "envoy.service.discovery.v2.SecretDiscoveryService.FetchSecrets", - "envoy.service.discovery.v2.SecretDiscoveryService.StreamSecrets", api_); - - subscription_->start({sds_config_name_}, *this); + init_manager.add(init_target_); } void SdsApi::onConfigUpdate(const ResourceVector& resources, const std::string&) { @@ -66,19 +55,21 @@ void SdsApi::onConfigUpdate(const ResourceVector& resources, const std::string&) update_callback_manager_.runCallbacks(); } - runInitializeCallbackIfAny(); + init_target_.ready(); } void SdsApi::onConfigUpdateFailed(const EnvoyException*) { // We need to allow server startup to continue, even if we have a bad config. - runInitializeCallbackIfAny(); + init_target_.ready(); } -void SdsApi::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } +void SdsApi::initialize() { + subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource< + envoy::api::v2::auth::Secret>( + sds_config_, local_info_, dispatcher_, cluster_manager_, random_, stats_, + "envoy.service.discovery.v2.SecretDiscoveryService.FetchSecrets", + "envoy.service.discovery.v2.SecretDiscoveryService.StreamSecrets", api_); + subscription_->start({sds_config_name_}, *this); } } // namespace Secret diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index 6123159b372fe..bb6132febd181 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -7,7 +7,7 @@ #include "envoy/api/v2/core/config_source.pb.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" #include "envoy/secret/secret_callbacks.h" @@ -18,6 +18,7 @@ #include "common/common/callback_impl.h" #include "common/common/cleanup.h" +#include "common/init/target_impl.h" #include "common/ssl/certificate_validation_context_config_impl.h" #include "common/ssl/tls_certificate_config_impl.h" @@ -27,8 +28,7 @@ namespace Secret { /** * SDS API implementation that fetches secrets from SDS server via Subscription. */ -class SdsApi : public Init::Target, - public Config::SubscriptionCallbacks { +class SdsApi : public Config::SubscriptionCallbacks { public: SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, Stats::Store& stats, @@ -36,9 +36,6 @@ class SdsApi : public Init::Target, const envoy::api::v2::core::ConfigSource& sds_config, const std::string& sds_config_name, std::function destructor_cb, Api::Api& api); - // Init::Target - void initialize(std::function callback) override; - // Config::SubscriptionCallbacks // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; @@ -58,8 +55,8 @@ class SdsApi : public Init::Target, Common::CallbackManager<> update_callback_manager_; private: - void runInitializeCallbackIfAny(); - + void initialize(); + Init::TargetImpl init_target_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; Runtime::RandomGenerator& random_; @@ -68,7 +65,6 @@ class SdsApi : public Init::Target, const envoy::api::v2::core::ConfigSource sds_config_; std::unique_ptr> subscription_; - std::function initialize_callback_; const std::string sds_config_name_; uint64_t secret_hash_; diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 0629e568dd77b..5197ae515a09e 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -406,9 +406,9 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/config:metadata_lib", "//source/common/config:well_known_names", + "//source/common/init:manager_lib", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", - "//source/server:init_manager_lib", "//source/server:transport_socket_config_lib", "@envoy_api//envoy/api/v2/core:base_cc", "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", @@ -476,7 +476,6 @@ envoy_cc_library( "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/clusters:well_known_names", - "//source/server:init_manager_lib", "//source/server:transport_socket_config_lib", "@envoy_api//envoy/api/v2/core:base_cc", "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index 6d4c8b95c62e5..ea8b1cdf7ac23 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -44,8 +44,6 @@ #include "common/upstream/resource_manager_impl.h" #include "common/upstream/upstream_impl.h" -#include "server/init_manager_impl.h" - #include "extensions/clusters/well_known_names.h" namespace Envoy { diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 301fb23536213..a50b650f4be87 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -645,7 +645,8 @@ ClusterImplBase::ClusterImplBase( const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) - : runtime_(runtime), init_manager_(fmt::format("Cluster {}", cluster.name())) { + : init_manager_(fmt::format("Cluster {}", cluster.name())), + init_watcher_("ClusterImplBase", [this]() { onInitDone(); }), runtime_(runtime) { factory_context.setInitManager(init_manager_); auto socket_factory = createTransportSocketFactory(cluster, factory_context); info_ = std::make_unique(cluster, factory_context.clusterManager().bindConfig(), @@ -715,7 +716,7 @@ void ClusterImplBase::onPreInitComplete() { initialization_started_ = true; ENVOY_LOG(debug, "initializing secondary cluster {} completed", info()->name()); - init_manager_.initialize([this]() { onInitDone(); }); + init_manager_.initialize(init_watcher_); } void ClusterImplBase::onInitDone() { diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 8a9096a2e89d6..f2ce1a7e4c3f9 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -34,14 +34,13 @@ #include "common/common/logger.h" #include "common/config/metadata.h" #include "common/config/well_known_names.h" +#include "common/init/manager_impl.h" #include "common/network/utility.h" #include "common/stats/isolated_store_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/outlier_detection_impl.h" #include "common/upstream/resource_manager_impl.h" -#include "server/init_manager_impl.h" - #include "absl/synchronization/mutex.h" namespace Envoy { @@ -673,8 +672,16 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable void { init_manager_.initialize([]() -> void {}); }); + clusterManager().setInitializedCb([this]() -> void { init_manager_.initialize(init_watcher_); }); } void ValidationInstance::shutdown() { diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 3e5d039d9b930..3d6493302faf4 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -145,7 +145,8 @@ class ValidationInstance : Logger::Loggable, // init_manager_ must come before any member that participates in initialization, and destructed // only after referencing members are gone, since initialization continuation can potentially // occur at any point during member lifetime. - InitManagerImpl init_manager_{"Validation server"}; + Init::ManagerImpl init_manager_{"Validation server"}; + Init::WatcherImpl init_watcher_{"(no-op)", []() {}}; // secret_manager_ must come before listener_manager_, config_ and dispatcher_, and destructed // only after these members can no longer reference it, since: // - There may be active filter chains referencing it in listener_manager_. diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 9c7da83e0e358..09b2b646fbed1 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -639,7 +639,7 @@ Http::Code AdminImpl::handlerServerInfo(absl::string_view, Http::HeaderMap& head server_info.set_version(VersionInfo::version()); switch (server_.initManager().state()) { - case Init::Manager::State::NotInitialized: + case Init::Manager::State::Uninitialized: server_info.set_state(envoy::admin::v2alpha::ServerInfo::PRE_INITIALIZING); break; case Init::Manager::State::Initializing: diff --git a/source/server/init_manager_impl.cc b/source/server/init_manager_impl.cc deleted file mode 100644 index 650d284217c1c..0000000000000 --- a/source/server/init_manager_impl.cc +++ /dev/null @@ -1,66 +0,0 @@ -#include "server/init_manager_impl.h" - -#include - -#include "common/common/assert.h" - -#define TRACE_INIT_MANAGER(fmt, ...) \ - ENVOY_LOG(debug, "InitManagerImpl({}): " fmt, description_, ##__VA_ARGS__) - -namespace Envoy { -namespace Server { - -InitManagerImpl::InitManagerImpl(absl::string_view description) : description_(description) { - TRACE_INIT_MANAGER("constructor"); -} - -InitManagerImpl::~InitManagerImpl() { TRACE_INIT_MANAGER("destructor"); } - -void InitManagerImpl::initialize(std::function callback) { - ASSERT(state_ == State::NotInitialized); - if (targets_.empty()) { - TRACE_INIT_MANAGER("empty targets, initialized"); - callback(); - state_ = State::Initialized; - } else { - TRACE_INIT_MANAGER("initializing"); - callback_ = callback; - state_ = State::Initializing; - // Target::initialize(...) method can modify the list to remove the item currently - // being initialized, so we increment the iterator before calling initialize. - for (auto iter = targets_.begin(); iter != targets_.end();) { - TargetWithDescription& target = *iter; - ++iter; - initializeTarget(target); - } - } -} - -void InitManagerImpl::initializeTarget(TargetWithDescription& target) { - TRACE_INIT_MANAGER("invoking initializeTarget {}", target.second); - target.first->initialize([this, &target]() -> void { - TRACE_INIT_MANAGER("completed initializeTarget {}", target.second); - ASSERT(std::find(targets_.begin(), targets_.end(), target) != targets_.end()); - targets_.remove(target); - if (targets_.empty()) { - TRACE_INIT_MANAGER("initialized"); - state_ = State::Initialized; - callback_(); - } - }); -} - -void InitManagerImpl::registerTarget(Init::Target& target, absl::string_view description) { - TRACE_INIT_MANAGER("registerTarget {}", description); - ASSERT(state_ != State::Initialized); - ASSERT(std::find(targets_.begin(), targets_.end(), - TargetWithDescription{&target, std::string(description)}) == targets_.end(), - "Registered duplicate Init::Target"); - targets_.emplace_back(&target, std::string(description)); - if (state_ == State::Initializing) { - initializeTarget(targets_.back()); - } -} - -} // namespace Server -} // namespace Envoy diff --git a/source/server/init_manager_impl.h b/source/server/init_manager_impl.h deleted file mode 100644 index e84ec4fbd32d7..0000000000000 --- a/source/server/init_manager_impl.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once - -#include - -#include "envoy/init/init.h" - -#include "common/common/logger.h" - -namespace Envoy { -namespace Server { - -/** - * Implementation of Init::Manager for use during post cluster manager init / pre listening. - * Deprecated, use SafeInit::ManagerImpl instead. - * TODO(mergeconflict): convert all Init::ManagerImpl uses to SafeInit::ManagerImpl. - */ -class InitManagerImpl : public Init::Manager, Logger::Loggable { -public: - InitManagerImpl(absl::string_view description); - ~InitManagerImpl() override; - - void initialize(std::function callback); - - // Init::Manager - void registerTarget(Init::Target& target, absl::string_view description) override; - State state() const override { return state_; } - -private: - using TargetWithDescription = std::pair; - - void initializeTarget(TargetWithDescription& target); - - std::list targets_; - State state_{State::NotInitialized}; - std::function callback_; - std::string description_; // For debug tracing. -}; - -} // namespace Server -} // namespace Envoy diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 2b5c18629b92d..10aa4b611d121 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -20,19 +20,15 @@ LdsApiImpl::LdsApiImpl(const envoy::api::v2::core::ConfigSource& lds_config, Runtime::RandomGenerator& random, Init::Manager& init_manager, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, ListenerManager& lm, Api::Api& api) - : listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm) { + : listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), + init_target_("LDS", [this]() { subscription_->start({}, *this); }) { subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( lds_config, local_info, dispatcher, cm, random, *scope_, "envoy.api.v2.ListenerDiscoveryService.FetchListeners", "envoy.api.v2.ListenerDiscoveryService.StreamListeners", api); Config::Utility::checkLocalInfo("lds", local_info); - init_manager.registerTarget(*this, "LDS"); -} - -void LdsApiImpl::initialize(std::function callback) { - initialize_callback_ = callback; - subscription_->start({}, *this); + init_manager.add(init_target_); } void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::string& version_info) { @@ -81,7 +77,7 @@ void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri } version_info_ = version_info; - runInitializeCallbackIfAny(); + init_target_.ready(); if (!exception_msgs.empty()) { throw EnvoyException(fmt::format("Error adding/updating listener(s) {}", StringUtil::join(exception_msgs, ", "))); @@ -91,14 +87,7 @@ void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri void LdsApiImpl::onConfigUpdateFailed(const EnvoyException*) { // We need to allow server startup to continue, even if we have a bad // config. - runInitializeCallbackIfAny(); -} - -void LdsApiImpl::runInitializeCallbackIfAny() { - if (initialize_callback_) { - initialize_callback_(); - initialize_callback_ = nullptr; - } + init_target_.ready(); } } // namespace Server diff --git a/source/server/lds_api.h b/source/server/lds_api.h index 713ead3f118a6..647e5664ea240 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -5,11 +5,12 @@ #include "envoy/api/api.h" #include "envoy/api/v2/lds.pb.h" #include "envoy/config/subscription.h" -#include "envoy/init/init.h" +#include "envoy/init/manager.h" #include "envoy/server/listener_manager.h" #include "envoy/stats/scope.h" #include "common/common/logger.h" +#include "common/init/target_impl.h" namespace Envoy { namespace Server { @@ -18,7 +19,6 @@ namespace Server { * LDS API implementation that fetches via Subscription. */ class LdsApiImpl : public LdsApi, - public Init::Target, Config::SubscriptionCallbacks, Logger::Loggable { public: @@ -30,9 +30,6 @@ class LdsApiImpl : public LdsApi, // Server::LdsApi std::string versionInfo() const override { return version_info_; } - // Init::Target - void initialize(std::function callback) override; - // Config::SubscriptionCallbacks // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; @@ -46,14 +43,12 @@ class LdsApiImpl : public LdsApi, } private: - void runInitializeCallbackIfAny(); - std::unique_ptr> subscription_; std::string version_info_; ListenerManager& listener_manager_; Stats::ScopePtr scope_; Upstream::ClusterManager& cm_; - std::function initialize_callback_; + Init::TargetImpl init_target_; }; } // namespace Server diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index ac8bd1a6c2f21..8598f2779d3dd 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -168,6 +168,8 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st listener_tag_(parent_.factory_.nextListenerTag()), name_(name), modifiable_(modifiable), workers_started_(workers_started), hash_(hash), dynamic_init_manager_(fmt::format("Listener {}", name)), + init_watcher_(std::make_unique( + "ListenerImpl", [this] { parent_.onListenerWarmed(*this); })), local_drain_manager_(parent.factory_.createDrainManager(config.drain_type())), config_(config), version_info_(version_info), listener_filters_timeout_( @@ -317,9 +319,9 @@ ListenerImpl::~ListenerImpl() { // The filter factories may have pending initialize actions (like in the case of RDS). Those // actions will fire in the destructor to avoid blocking initial server startup. If we are using // a local init manager we should block the notification from trying to move us from warming to - // active. This is done here explicitly by setting a boolean and then clearing the factory + // active. This is done here explicitly by resetting the watcher and then clearing the factory // vector for clarity. - initialize_canceled_ = true; + init_watcher_.reset(); destination_ports_map_.clear(); } @@ -629,13 +631,9 @@ void ListenerImpl::initialize() { last_updated_ = timeSource().systemTime(); // If workers have already started, we shift from using the global init manager to using a local // per listener init manager. See ~ListenerImpl() for why we gate the onListenerWarmed() call - // with initialize_canceled_. + // by resetting the watcher. if (workers_started_) { - dynamic_init_manager_.initialize([this]() -> void { - if (!initialize_canceled_) { - parent_.onListenerWarmed(*this); - } - }); + dynamic_init_manager_.initialize(*init_watcher_); } } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 647edfa5a89c9..41da1135d4443 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -12,10 +12,10 @@ #include "envoy/stats/scope.h" #include "common/common/logger.h" +#include "common/init/manager_impl.h" #include "common/network/cidr_range.h" #include "common/network/lc_trie.h" -#include "server/init_manager_impl.h" #include "server/lds_api.h" namespace Envoy { @@ -403,8 +403,14 @@ class ListenerImpl : public Network::ListenerConfig, const bool modifiable_; const bool workers_started_; const uint64_t hash_; - InitManagerImpl dynamic_init_manager_; - bool initialize_canceled_{}; + + // This init manager is populated with targets from the filter chain factories, namely + // RdsRouteConfigSubscription::init_target_, so the listener can wait for route configs. + Init::ManagerImpl dynamic_init_manager_; + + // This init watcher, if available, notifies the "parent" listener manager when listener + // initialization is complete. It may be reset to cancel interest. + std::unique_ptr init_watcher_; std::vector listener_filter_factories_; DrainManagerPtr local_drain_manager_; bool saw_listener_create_failure_{}; diff --git a/source/server/server.cc b/source/server/server.cc index c15c4c29ccd57..a75d0d291ba47 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -412,9 +412,13 @@ uint64_t InstanceImpl::numConnections() { return listener_manager_->numConnectio RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, AccessLog::AccessLogManager& access_log_manager, - InitManagerImpl& init_manager, OverloadManager& overload_manager, - std::function workers_start_cb) { - + Init::Manager& init_manager, OverloadManager& overload_manager, + std::function workers_start_cb) + : init_watcher_("RunHelper", [&instance, workers_start_cb]() { + if (!instance.isShutdown()) { + workers_start_cb(); + } + }) { // Setup signals. if (options.signalHandlingEnabled()) { sigterm_ = dispatcher.listenForSignal(SIGTERM, [&instance]() { @@ -445,7 +449,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch // this can fire immediately if all clusters have already initialized. Also note that we need // to guard against shutdown at two different levels since SIGTERM can come in once the run loop // starts. - cm.setInitializedCb([&instance, &init_manager, &cm, workers_start_cb]() { + cm.setInitializedCb([&instance, &init_manager, &cm, this]() { if (instance.isShutdown()) { return; } @@ -456,16 +460,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch cm.adsMux().pause(Config::TypeUrl::get().RouteConfiguration); ENVOY_LOG(info, "all clusters initialized. initializing init manager"); - - // Note: the lambda below should not capture "this" since the RunHelper object may - // have been destructed by the time it gets executed. - init_manager.initialize([&instance, workers_start_cb]() { - if (instance.isShutdown()) { - return; - } - - workers_start_cb(); - }); + init_manager.initialize(init_watcher_); // Now that we're execute all the init callbacks we can resume RDS // as we've subscribed to all the statically defined RDS resources. @@ -474,11 +469,10 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch } void InstanceImpl::run() { - // We need the RunHelper to be available to call from InstanceImpl::shutdown() below, so - // we save it as a member variable. - run_helper_ = std::make_unique(*this, options_, *dispatcher_, clusterManager(), - access_log_manager_, init_manager_, overloadManager(), - [this]() -> void { startWorkers(); }); + // RunHelper exists primarily to facilitate testing of how we respond to early shutdown during + // startup (see RunHelperTest in server_test.cc). + auto run_helper = RunHelper(*this, options_, *dispatcher_, clusterManager(), access_log_manager_, + init_manager_, overloadManager(), [this] { startWorkers(); }); // Run the main dispatch loop waiting to exit. ENVOY_LOG(info, "starting main dispatch loop"); @@ -491,7 +485,6 @@ void InstanceImpl::run() { watchdog.reset(); terminate(); - run_helper_.reset(); } void InstanceImpl::terminate() { diff --git a/source/server/server.h b/source/server/server.h index b897918cf8d7f..c09479dfb610d 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -21,6 +21,7 @@ #include "common/common/logger_delegates.h" #include "common/grpc/async_client_manager_impl.h" #include "common/http/context_impl.h" +#include "common/init/manager_impl.h" #include "common/memory/heap_shrinker.h" #include "common/runtime/runtime_impl.h" #include "common/secret/secret_manager_impl.h" @@ -28,7 +29,6 @@ #include "server/configuration_impl.h" #include "server/http/admin.h" -#include "server/init_manager_impl.h" #include "server/listener_manager_impl.h" #include "server/overload_manager_impl.h" #include "server/test_hooks.h" @@ -122,10 +122,11 @@ class RunHelper : Logger::Loggable { public: RunHelper(Instance& instance, const Options& options, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, AccessLog::AccessLogManager& access_log_manager, - InitManagerImpl& init_manager, OverloadManager& overload_manager, + Init::Manager& init_manager, OverloadManager& overload_manager, std::function workers_start_cb); private: + Init::WatcherImpl init_watcher_; Event::SignalEventPtr sigterm_; Event::SignalEventPtr sigint_; Event::SignalEventPtr sig_usr_1_; @@ -209,8 +210,8 @@ class InstanceImpl : Logger::Loggable, // init_manager_ must come before any member that participates in initialization, and destructed // only after referencing members are gone, since initialization continuation can potentially - // occur at any point during member lifetime. - InitManagerImpl init_manager_{"Server"}; + // occur at any point during member lifetime. This init manager is populated with LdsApi targets. + Init::ManagerImpl init_manager_{"Server"}; // secret_manager_ must come before listener_manager_, config_ and dispatcher_, and destructed // only after these members can no longer reference it, since: // - There may be active filter chains referencing it in listener_manager_. @@ -255,7 +256,6 @@ class InstanceImpl : Logger::Loggable, Upstream::ProdClusterInfoFactory info_factory_; Upstream::HdsDelegatePtr hds_delegate_; std::unique_ptr overload_manager_; - std::unique_ptr run_helper_; Envoy::MutexTracer* mutex_tracer_; Http::ContextImpl http_context_; std::unique_ptr heap_shrinker_; diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index f25f77a0b1dd8..56d6ab008304f 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -235,7 +235,8 @@ test::common::config::DummyConfig parseDummyConfigFromYaml(const std::string& ya // subscriptions, config protos and data structures generated as a result of the // configurations (i.e., the ConfigProvider::Config). TEST_F(ConfigProviderImplTest, SharedOwnership) { - factory_context_.init_manager_.initialize(); + Init::ExpectableWatcherImpl watcher; + factory_context_.init_manager_.initialize(watcher); envoy::api::v2::core::ApiConfigSource config_source_proto; config_source_proto.set_api_type(envoy::api::v2::core::ApiConfigSource::GRPC); diff --git a/test/common/safe_init/BUILD b/test/common/init/BUILD similarity index 69% rename from test/common/safe_init/BUILD rename to test/common/init/BUILD index 35dd33cd09b7c..894e7493aa722 100644 --- a/test/common/safe_init/BUILD +++ b/test/common/init/BUILD @@ -12,7 +12,7 @@ envoy_cc_test( name = "watcher_impl_test", srcs = ["watcher_impl_test.cc"], deps = [ - "//test/mocks/safe_init:safe_init_mocks", + "//test/mocks/init:init_mocks", ], ) @@ -20,7 +20,7 @@ envoy_cc_test( name = "target_impl_test", srcs = ["target_impl_test.cc"], deps = [ - "//test/mocks/safe_init:safe_init_mocks", + "//test/mocks/init:init_mocks", ], ) @@ -28,7 +28,7 @@ envoy_cc_test( name = "manager_impl_test", srcs = ["manager_impl_test.cc"], deps = [ - "//source/common/safe_init:manager_lib", - "//test/mocks/safe_init:safe_init_mocks", + "//source/common/init:manager_lib", + "//test/mocks/init:init_mocks", ], ) diff --git a/test/common/safe_init/manager_impl_test.cc b/test/common/init/manager_impl_test.cc similarity index 86% rename from test/common/safe_init/manager_impl_test.cc rename to test/common/init/manager_impl_test.cc index 88e6cc97c7d34..8a479b0c1977a 100644 --- a/test/common/safe_init/manager_impl_test.cc +++ b/test/common/init/manager_impl_test.cc @@ -1,6 +1,6 @@ -#include "common/safe_init/manager_impl.h" +#include "common/init/manager_impl.h" -#include "test/mocks/safe_init/mocks.h" +#include "test/mocks/init/mocks.h" #include "gtest/gtest.h" @@ -8,14 +8,14 @@ using ::testing::InSequence; using ::testing::InvokeWithoutArgs; namespace Envoy { -namespace SafeInit { +namespace Init { namespace { void expectUninitialized(const Manager& m) { EXPECT_EQ(Manager::State::Uninitialized, m.state()); } void expectInitializing(const Manager& m) { EXPECT_EQ(Manager::State::Initializing, m.state()); } void expectInitialized(const Manager& m) { EXPECT_EQ(Manager::State::Initialized, m.state()); } -TEST(SafeInitManagerImplTest, AddImmediateTargetsWhenUninitialized) { +TEST(InitManagerImplTest, AddImmediateTargetsWhenUninitialized) { InSequence s; ManagerImpl m("test"); @@ -37,7 +37,7 @@ TEST(SafeInitManagerImplTest, AddImmediateTargetsWhenUninitialized) { expectInitialized(m); } -TEST(SafeInitManagerImplTest, AddAsyncTargetsWhenUninitialized) { +TEST(InitManagerImplTest, AddAsyncTargetsWhenUninitialized) { InSequence s; ManagerImpl m("test"); @@ -67,7 +67,7 @@ TEST(SafeInitManagerImplTest, AddAsyncTargetsWhenUninitialized) { expectInitialized(m); } -TEST(SafeInitManagerImplTest, AddMixedTargetsWhenUninitialized) { +TEST(InitManagerImplTest, AddMixedTargetsWhenUninitialized) { InSequence s; ManagerImpl m("test"); @@ -93,7 +93,7 @@ TEST(SafeInitManagerImplTest, AddMixedTargetsWhenUninitialized) { expectInitialized(m); } -TEST(SafeInitManagerImplTest, AddImmediateTargetWhenInitializing) { +TEST(InitManagerImplTest, AddImmediateTargetWhenInitializing) { InSequence s; ManagerImpl m("test"); @@ -121,7 +121,7 @@ TEST(SafeInitManagerImplTest, AddImmediateTargetWhenInitializing) { expectInitialized(m); } -TEST(SafeInitManagerImplTest, UnavailableTarget) { +TEST(InitManagerImplTest, UnavailableTarget) { InSequence s; ManagerImpl m("test"); @@ -142,7 +142,7 @@ TEST(SafeInitManagerImplTest, UnavailableTarget) { expectInitialized(m); } -TEST(SafeInitManagerImplTest, UnavailableManager) { +TEST(InitManagerImplTest, UnavailableManager) { InSequence s; ExpectableTargetImpl t("t"); @@ -165,7 +165,7 @@ TEST(SafeInitManagerImplTest, UnavailableManager) { t.ready(); } -TEST(SafeInitManagerImplTest, UnavailableWatcher) { +TEST(InitManagerImplTest, UnavailableWatcher) { InSequence s; ManagerImpl m("test"); @@ -190,5 +190,5 @@ TEST(SafeInitManagerImplTest, UnavailableWatcher) { } } // namespace -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/test/common/safe_init/target_impl_test.cc b/test/common/init/target_impl_test.cc similarity index 82% rename from test/common/safe_init/target_impl_test.cc rename to test/common/init/target_impl_test.cc index df0c41fad2f1e..7cebbb371d9e9 100644 --- a/test/common/safe_init/target_impl_test.cc +++ b/test/common/init/target_impl_test.cc @@ -1,19 +1,19 @@ -#include "test/mocks/safe_init/mocks.h" +#include "test/mocks/init/mocks.h" #include "gtest/gtest.h" using ::testing::InSequence; namespace Envoy { -namespace SafeInit { +namespace Init { namespace { -TEST(SafeInitTargetImplTest, Name) { +TEST(InitTargetImplTest, Name) { ExpectableTargetImpl target; EXPECT_EQ("target test", target.name()); } -TEST(SafeInitTargetImplTest, InitializeWhenAvailable) { +TEST(InitTargetImplTest, InitializeWhenAvailable) { InSequence s; ExpectableTargetImpl target; @@ -32,7 +32,7 @@ TEST(SafeInitTargetImplTest, InitializeWhenAvailable) { EXPECT_FALSE(target.ready()); } -TEST(SafeInitTargetImplTest, InitializeWhenUnavailable) { +TEST(InitTargetImplTest, InitializeWhenUnavailable) { ExpectableWatcherImpl watcher; TargetHandlePtr handle; { @@ -45,7 +45,7 @@ TEST(SafeInitTargetImplTest, InitializeWhenUnavailable) { EXPECT_FALSE(handle->initialize(watcher)); } -TEST(SafeInitTargetImplTest, ReadyWhenWatcherUnavailable) { +TEST(InitTargetImplTest, ReadyWhenWatcherUnavailable) { ExpectableTargetImpl target; { ExpectableWatcherImpl watcher; @@ -61,5 +61,5 @@ TEST(SafeInitTargetImplTest, ReadyWhenWatcherUnavailable) { } } // namespace -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/test/common/safe_init/watcher_impl_test.cc b/test/common/init/watcher_impl_test.cc similarity index 72% rename from test/common/safe_init/watcher_impl_test.cc rename to test/common/init/watcher_impl_test.cc index 39abccbf40f93..020c5467bbc74 100644 --- a/test/common/safe_init/watcher_impl_test.cc +++ b/test/common/init/watcher_impl_test.cc @@ -1,17 +1,17 @@ -#include "test/mocks/safe_init/mocks.h" +#include "test/mocks/init/mocks.h" #include "gtest/gtest.h" namespace Envoy { -namespace SafeInit { +namespace Init { namespace { -TEST(SafeInitWatcherImplTest, Name) { +TEST(InitWatcherImplTest, Name) { ExpectableWatcherImpl watcher; EXPECT_EQ("test", watcher.name()); } -TEST(SafeInitWatcherImplTest, ReadyWhenAvailable) { +TEST(InitWatcherImplTest, ReadyWhenAvailable) { ExpectableWatcherImpl watcher; // notifying the watcher through its handle should invoke ready(). @@ -19,7 +19,7 @@ TEST(SafeInitWatcherImplTest, ReadyWhenAvailable) { EXPECT_TRUE(watcher.createHandle("test")->ready()); } -TEST(SafeInitWatcherImplTest, ReadyWhenUnavailable) { +TEST(InitWatcherImplTest, ReadyWhenUnavailable) { WatcherHandlePtr handle; { ExpectableWatcherImpl watcher; @@ -32,5 +32,5 @@ TEST(SafeInitWatcherImplTest, ReadyWhenUnavailable) { } } // namespace -} // namespace SafeInit +} // namespace Init } // namespace Envoy diff --git a/test/common/router/BUILD b/test/common/router/BUILD index b6d1ac73a979e..8a3de57a8909e 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -62,7 +62,6 @@ envoy_cc_test( "//source/common/json:json_loader_lib", "//source/common/router:rds_lib", "//source/server/http:admin_lib", - "//test/mocks/init:init_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/server:server_mocks", "//test/mocks/thread_local:thread_local_mocks", diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 266793ce209ad..5aa1ac1c96e69 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -49,7 +49,15 @@ parseHttpConnectionManagerFromJson(const std::string& json_string, const Stats:: class RdsTestBase : public testing::Test { public: - RdsTestBase() : request_(&factory_context_.cluster_manager_.async_client_) {} + RdsTestBase() : request_(&factory_context_.cluster_manager_.async_client_) { + ON_CALL(factory_context_.init_manager_, add(_)) + .WillByDefault(Invoke([this](const Init::Target& target) { + init_target_handle_ = target.createHandle("test"); + })); + ON_CALL(factory_context_.init_manager_, initialize(_)) + .WillByDefault(Invoke( + [this](const Init::Watcher& watcher) { init_target_handle_->initialize(watcher); })); + } void expectRequest() { EXPECT_CALL(factory_context_.cluster_manager_, httpAsyncClientForCluster("foo_cluster")); @@ -72,6 +80,8 @@ class RdsTestBase : public testing::Test { Event::SimulatedTimeSystem time_system_; NiceMock factory_context_; + Init::ExpectableWatcherImpl init_watcher_; + Init::TargetHandlePtr init_target_handle_; Http::MockAsyncClientRequest request_; Http::AsyncClient::Callbacks* callbacks_{}; Event::MockTimer* interval_timer_{}; @@ -112,12 +122,12 @@ class RdsImplTest : public RdsTestBase { EXPECT_CALL(cluster, info()); EXPECT_CALL(*cluster.info_, type()); interval_timer_ = new Event::MockTimer(&factory_context_.dispatcher_); - EXPECT_CALL(factory_context_.init_manager_, registerTarget(_, _)); + EXPECT_CALL(factory_context_.init_manager_, add(_)); rds_ = RouteConfigProviderUtil::create(parseHttpConnectionManagerFromJson(config_json, scope_), factory_context_, "foo.", *route_config_provider_manager_); expectRequest(); - factory_context_.init_manager_.initialize(); + factory_context_.init_manager_.initialize(init_watcher_); } NiceMock scope_; @@ -198,7 +208,7 @@ TEST_F(RdsImplTest, DestroyDuringInitialize) { InSequence s; setup(); - EXPECT_CALL(factory_context_.init_manager_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(request_, cancel()); rds_.reset(); } @@ -232,7 +242,7 @@ TEST_F(RdsImplTest, Basic) { Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "200"}}})); message->body() = std::make_unique(response1_json); - EXPECT_CALL(factory_context_.init_manager_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); EXPECT_EQ(nullptr, rds_->config()->route(Http::TestHeaderMapImpl{{":authority", "foo"}}, 0)); @@ -342,7 +352,7 @@ TEST_F(RdsImplTest, Failure) { Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "200"}}})); message->body() = std::make_unique(response_json); - EXPECT_CALL(factory_context_.init_manager_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); @@ -465,7 +475,7 @@ name: foo // Static + dynamic. setup(); expectRequest(); - factory_context_.init_manager_.initialize(); + factory_context_.init_manager_.initialize(init_watcher_); const std::string response1_json = R"EOF( { @@ -483,7 +493,7 @@ name: foo Http::MessagePtr message(new Http::ResponseMessageImpl( Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "200"}}})); message->body() = std::make_unique(response1_json); - EXPECT_CALL(factory_context_.init_manager_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); message_ptr = factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); @@ -519,8 +529,6 @@ name: foo TEST_F(RouteConfigProviderManagerImplTest, Basic) { Buffer::OwnedImpl data; - factory_context_.init_manager_.initialize(); - // Get a RouteConfigProvider. This one should create an entry in the RouteConfigProviderManager. setup(); @@ -616,9 +624,9 @@ TEST_F(RouteConfigProviderManagerImplTest, ValidateFail) { TEST_F(RouteConfigProviderManagerImplTest, onConfigUpdateEmpty) { setup(); - factory_context_.init_manager_.initialize(); + factory_context_.init_manager_.initialize(init_watcher_); auto& provider_impl = dynamic_cast(*provider_.get()); - EXPECT_CALL(factory_context_.init_manager_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); provider_impl.subscription().onConfigUpdate({}, ""); EXPECT_EQ( 1UL, factory_context_.scope_.counter("foo_prefix.rds.foo_route_config.update_empty").value()); @@ -626,12 +634,12 @@ TEST_F(RouteConfigProviderManagerImplTest, onConfigUpdateEmpty) { TEST_F(RouteConfigProviderManagerImplTest, onConfigUpdateWrongSize) { setup(); - factory_context_.init_manager_.initialize(); + factory_context_.init_manager_.initialize(init_watcher_); auto& provider_impl = dynamic_cast(*provider_.get()); Protobuf::RepeatedPtrField route_configs; route_configs.Add(); route_configs.Add(); - EXPECT_CALL(factory_context_.init_manager_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE(provider_impl.subscription().onConfigUpdate(route_configs, ""), EnvoyException, "Unexpected RDS resource length: 2"); } diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index 87f6aacb16819..91108c6c07c0f 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -39,7 +39,12 @@ TEST_F(SdsApiTest, BasicTest) { const envoy::service::discovery::v2::SdsDummy dummy; NiceMock server; NiceMock init_manager; - EXPECT_CALL(init_manager, registerTarget(_, _)); + NiceMock init_watcher; + Init::TargetHandlePtr init_target_handle; + EXPECT_CALL(init_manager, add(_)) + .WillOnce(Invoke([&init_target_handle](const Init::Target& target) { + init_target_handle = target.createHandle("test"); + })); envoy::api::v2::core::ConfigSource config_source; config_source.mutable_api_config_source()->set_api_type( @@ -61,8 +66,8 @@ TEST_F(SdsApiTest, BasicTest) { EXPECT_CALL(*factory, create()).WillOnce(Invoke([grpc_client] { return Grpc::AsyncClientPtr{grpc_client}; })); - EXPECT_CALL(init_manager.initialized_, ready()); - init_manager.initialize(); + EXPECT_CALL(init_watcher, ready()); + init_target_handle->initialize(init_watcher); } // Validate that TlsCertificateSdsApi updates secrets successfully if a good secret diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 736a3dd783811..1e1e67bf565ee 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -675,6 +675,85 @@ TEST_P(AdsIntegrationTest, RdsAfterLdsWithRdsChange) { makeSingleRequest(); } +// Regression test for the use-after-free crash when a listener awaiting an RDS update is destroyed +// (#6116). +TEST_P(AdsIntegrationTest, RdsAfterLdsInvalidated) { + + initialize(); + + // STEP 1: Initial setup + // --------------------- + + // Initial request for any cluster, respond with cluster_0 version 1 + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {})); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, "1"); + + // Initial request for load assignment for cluster_0, respond with version 1 + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", {"cluster_0"})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, "1"); + + // Request for updates to cluster_0 version 1, no response + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {})); + + // Initial request for any listener, respond with listener_0 version 1 + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, "1"); + + // Request for updates to load assignment version 1, no response + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {"cluster_0"})); + + // Initial request for route_config_0 (referenced by listener_0), respond with version 1 + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", {"route_config_0"})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_0", "cluster_0")}, + "1"); + + // Wait for initial listener to be created successfully. Any subsequent listeners will then use + // the dynamic InitManager (see ListenerImpl::initManager). + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + + // STEP 2: Listener with dynamic InitManager + // ----------------------------------------- + + // Request for updates to listener_0 version 1, respond with version 2. Under the hood, this + // registers RdsRouteConfigSubscription's init target with the new ListenerImpl instance. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_1")}, "2"); + + // Request for updates to route_config_0 version 1, and initial request for route_config_1 + // (referenced by listener_0), don't respond yet! + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", {"route_config_0"})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"route_config_1", "route_config_0"})); + + // STEP 3: "New listener, who dis?" + // -------------------------------- + + // Request for updates to listener_0 version 2, respond with version 3 (updated stats prefix). + // This should blow away the previous ListenerImpl instance, which is still waiting for + // route_config_1... + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "2", {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_1", "omg")}, "3"); + + // Respond to prior request for route_config_1. Under the hood, this invokes + // RdsRouteConfigSubscription::runInitializeCallbackIfAny, which references the defunct + // ListenerImpl instance. We should not crash in this event! + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_1", "cluster_0")}, + "1"); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); +} + class AdsFailIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index d7508b3339477..a8cc23cafaa08 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -16,7 +16,6 @@ #include "test/integration/http_integration.h" #include "test/integration/server.h" #include "test/integration/ssl_utility.h" -#include "test/mocks/init/mocks.h" #include "test/mocks/secret/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index dcd1d0814d1ce..453942faf0c06 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -14,7 +14,6 @@ #include "test/integration/http_integration.h" #include "test/integration/server.h" #include "test/integration/ssl_utility.h" -#include "test/mocks/init/mocks.h" #include "test/mocks/secret/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" diff --git a/test/mocks/init/BUILD b/test/mocks/init/BUILD index 682c862e1a4e7..5aa9f74bacd3c 100644 --- a/test/mocks/init/BUILD +++ b/test/mocks/init/BUILD @@ -13,7 +13,8 @@ envoy_cc_mock( srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ - "//include/envoy/init:init_interface", - "//test/mocks:common_lib", + "//include/envoy/init:manager_interface", + "//source/common/init:target_lib", + "//source/common/init:watcher_lib", ], ) diff --git a/test/mocks/init/mocks.cc b/test/mocks/init/mocks.cc index f968ad7c290bc..9e28923af3c0c 100644 --- a/test/mocks/init/mocks.cc +++ b/test/mocks/init/mocks.cc @@ -1,33 +1,25 @@ -#include "mocks.h" - -#include - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::Invoke; +#include "test/mocks/init/mocks.h" namespace Envoy { namespace Init { -MockTarget::MockTarget() { - ON_CALL(*this, initialize(_)) - .WillByDefault(Invoke([this](std::function callback) -> void { - EXPECT_EQ(nullptr, callback_); - callback_ = callback; - })); -} +using ::testing::Invoke; -MockTarget::~MockTarget() {} - -MockManager::MockManager() { - ON_CALL(*this, registerTarget(_, _)) - .WillByDefault(Invoke( - [this](Target& target, absl::string_view) -> void { targets_.push_back(&target); })); +ExpectableWatcherImpl::ExpectableWatcherImpl(absl::string_view name) + : WatcherImpl(name, {[this]() { ready(); }}) {} +::testing::internal::TypedExpectation& ExpectableWatcherImpl::expectReady() const { + return EXPECT_CALL(*this, ready()); } -MockManager::~MockManager() {} +ExpectableTargetImpl::ExpectableTargetImpl(absl::string_view name) + : TargetImpl(name, {[this]() { initialize(); }}) {} +::testing::internal::TypedExpectation& ExpectableTargetImpl::expectInitialize() { + return EXPECT_CALL(*this, initialize()); +} +::testing::internal::TypedExpectation& +ExpectableTargetImpl::expectInitializeWillCallReady() { + return expectInitialize().WillOnce(Invoke([this]() { ready(); })); +} } // namespace Init } // namespace Envoy diff --git a/test/mocks/init/mocks.h b/test/mocks/init/mocks.h index e8f6d093a8270..44189ac091447 100644 --- a/test/mocks/init/mocks.h +++ b/test/mocks/init/mocks.h @@ -1,44 +1,65 @@ #pragma once -#include -#include +#include "envoy/init/manager.h" -#include "envoy/init/init.h" - -#include "test/mocks/common.h" +#include "common/init/target_impl.h" +#include "common/init/watcher_impl.h" #include "gmock/gmock.h" namespace Envoy { namespace Init { -class MockTarget : public Target { +/** + * ExpectableWatcherImpl is a real WatcherImpl, subclassed to add a mock `ready` method that you can + * set expectations on in tests. Tests should never want a watcher with different behavior than the + * real implementation. + */ +class ExpectableWatcherImpl : public WatcherImpl { public: - MockTarget(); - ~MockTarget(); - - MOCK_METHOD1(initialize, void(std::function callback)); + ExpectableWatcherImpl(absl::string_view name = "test"); + MOCK_CONST_METHOD0(ready, void()); - std::function callback_; + /** + * Convenience method to provide a shorthand for EXPECT_CALL(watcher, ready()). Can be chained, + * for example: watcher.expectReady().Times(0); + */ + ::testing::internal::TypedExpectation& expectReady() const; }; -class MockManager : public Manager { +/** + * ExpectableTargetImpl is a real TargetImpl, subclassed to add a mock `initialize` method that you + * can set expectations on in tests. Tests should never want a target with a different behavior than + * the real implementation. + */ +class ExpectableTargetImpl : public TargetImpl { public: - MockManager(); - ~MockManager(); + ExpectableTargetImpl(absl::string_view name = "test"); + MOCK_METHOD0(initialize, void()); - void initialize() { - for (auto target : targets_) { - target->initialize([this]() -> void { initialized_.ready(); }); - } - } + /** + * Convenience method to provide a shorthand for EXPECT_CALL(target, initialize()). Can be + * chained, for example: target.expectInitialize().Times(0); + */ + ::testing::internal::TypedExpectation& expectInitialize(); - // Init::Manager - MOCK_METHOD2(registerTarget, void(Target& target, absl::string_view description)); - MOCK_CONST_METHOD0(state, State()); + /** + * Convenience method to provide a shorthand for expectInitialize() with mocked behavior of + * calling `ready` immediately. + */ + ::testing::internal::TypedExpectation& expectInitializeWillCallReady(); +}; - std::list targets_; - ReadyWatcher initialized_; +/** + * MockManager is a typical mock. In many cases, it won't be necessary to mock any of its methods. + * In cases where its `add` and `initialize` methods are actually called in a test, it's usually + * sufficient to mock `add` by saving the target argument locally, and to mock `initialize` by + * invoking the saved target with the watcher argument. + */ +struct MockManager : Manager { + MOCK_CONST_METHOD0(state, Manager::State()); + MOCK_METHOD1(add, void(const Target&)); + MOCK_METHOD1(initialize, void(const Watcher&)); }; } // namespace Init diff --git a/test/mocks/router/BUILD b/test/mocks/router/BUILD index 75e4a07c6f393..76e2c77be171b 100644 --- a/test/mocks/router/BUILD +++ b/test/mocks/router/BUILD @@ -14,7 +14,6 @@ envoy_cc_mock( hdrs = ["mocks.h"], deps = [ "//include/envoy/event:dispatcher_interface", - "//include/envoy/init:init_interface", "//include/envoy/json:json_object_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/router:route_config_provider_manager_interface", diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 8a1e7d30653d8..ba40815aaab49 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -10,7 +10,6 @@ #include "envoy/config/typed_metadata.h" #include "envoy/event/dispatcher.h" -#include "envoy/init/init.h" #include "envoy/json/json_object.h" #include "envoy/local_info/local_info.h" #include "envoy/router/rds.h" diff --git a/test/mocks/safe_init/BUILD b/test/mocks/safe_init/BUILD deleted file mode 100644 index fbb24c52d1861..0000000000000 --- a/test/mocks/safe_init/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_mock", - "envoy_package", -) - -envoy_package() - -envoy_cc_mock( - name = "safe_init_mocks", - srcs = ["mocks.cc"], - hdrs = ["mocks.h"], - deps = [ - "//include/envoy/safe_init:manager_interface", - "//source/common/safe_init:target_lib", - "//source/common/safe_init:watcher_lib", - ], -) diff --git a/test/mocks/safe_init/mocks.cc b/test/mocks/safe_init/mocks.cc deleted file mode 100644 index 1ef93da2f3074..0000000000000 --- a/test/mocks/safe_init/mocks.cc +++ /dev/null @@ -1,25 +0,0 @@ -#include "test/mocks/safe_init/mocks.h" - -namespace Envoy { -namespace SafeInit { - -using ::testing::Invoke; - -ExpectableWatcherImpl::ExpectableWatcherImpl(absl::string_view name) - : WatcherImpl(name, {[this]() { ready(); }}) {} -::testing::internal::TypedExpectation& ExpectableWatcherImpl::expectReady() const { - return EXPECT_CALL(*this, ready()); -} - -ExpectableTargetImpl::ExpectableTargetImpl(absl::string_view name) - : TargetImpl(name, {[this]() { initialize(); }}) {} -::testing::internal::TypedExpectation& ExpectableTargetImpl::expectInitialize() { - return EXPECT_CALL(*this, initialize()); -} -::testing::internal::TypedExpectation& -ExpectableTargetImpl::expectInitializeWillCallReady() { - return expectInitialize().WillOnce(Invoke([this]() { ready(); })); -} - -} // namespace SafeInit -} // namespace Envoy diff --git a/test/mocks/safe_init/mocks.h b/test/mocks/safe_init/mocks.h deleted file mode 100644 index 92a41cf4d7389..0000000000000 --- a/test/mocks/safe_init/mocks.h +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once - -#include "envoy/safe_init/manager.h" - -#include "common/safe_init/target_impl.h" -#include "common/safe_init/watcher_impl.h" - -#include "gmock/gmock.h" - -namespace Envoy { -namespace SafeInit { - -/** - * ExpectableWatcherImpl is a real WatcherImpl, subclassed to add a mock `ready` method that you can - * set expectations on in tests. Tests should never want a watcher with different behavior than the - * real implementation. - */ -class ExpectableWatcherImpl : public WatcherImpl { -public: - ExpectableWatcherImpl(absl::string_view name = "test"); - MOCK_CONST_METHOD0(ready, void()); - - /** - * Convenience method to provide a shorthand for EXPECT_CALL(watcher, ready()). Can be chained, - * for example: watcher.expectReady().Times(0); - */ - ::testing::internal::TypedExpectation& expectReady() const; -}; - -/** - * ExpectableTargetImpl is a real TargetImpl, subclassed to add a mock `initialize` method that you - * can set expectations on in tests. Tests should never want a target with a different behavior than - * the real implementation. - */ -class ExpectableTargetImpl : public TargetImpl { -public: - ExpectableTargetImpl(absl::string_view name = "test"); - MOCK_METHOD0(initialize, void()); - - /** - * Convenience method to provide a shorthand for EXPECT_CALL(target, initialize()). Can be - * chained, for example: target.expectInitialize().Times(0); - */ - ::testing::internal::TypedExpectation& expectInitialize(); - - /** - * Convenience method to provide a shorthand for expectInitialize() with mocked behavior of - * calling `ready` immediately. - */ - ::testing::internal::TypedExpectation& expectInitializeWillCallReady(); -}; - -/** - * MockManager is a typical mock. In many cases, it won't be necessary to mock any of its methods. - * In cases where its `add` and `initialize` methods are actually called in a test, it's usually - * sufficient to mock `add` by saving the target argument locally, and to mock `initialize` by - * invoking the saved target with the watcher argument. - */ -struct MockManager : Manager { - MOCK_CONST_METHOD0(state, Manager::State()); - MOCK_METHOD1(add, void(const Target&)); - MOCK_METHOD1(initialize, void(const Watcher&)); -}; - -} // namespace SafeInit -} // namespace Envoy diff --git a/test/server/BUILD b/test/server/BUILD index 1c64006680419..8cae70848f762 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -80,16 +80,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "init_manager_impl_test", - srcs = ["init_manager_impl_test.cc"], - deps = [ - "//source/server:init_manager_lib", - "//test/mocks:common_lib", - "//test/mocks/init:init_mocks", - ], -) - envoy_cc_test( name = "guarddog_impl_test", srcs = ["guarddog_impl_test.cc"], diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index bae835f4f65af..bffaf0c9380f6 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -1191,7 +1191,7 @@ TEST_P(AdminInstanceTest, GetRequest) { Http::HeaderMapImpl response_headers; std::string body; - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::NotInitialized)); + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); envoy::admin::v2alpha::ServerInfo server_info_proto; EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), diff --git a/test/server/init_manager_impl_test.cc b/test/server/init_manager_impl_test.cc deleted file mode 100644 index 964db18551670..0000000000000 --- a/test/server/init_manager_impl_test.cc +++ /dev/null @@ -1,69 +0,0 @@ -#include "server/init_manager_impl.h" - -#include "test/mocks/common.h" -#include "test/mocks/init/mocks.h" - -#include "gmock/gmock.h" - -using testing::_; -using testing::InSequence; -using testing::Invoke; - -namespace Envoy { -namespace Server { -namespace { - -class InitManagerImplTest : public testing::Test { -public: - InitManagerImpl manager_{"test"}; - ReadyWatcher initialized_; -}; - -TEST_F(InitManagerImplTest, NoTargets) { - EXPECT_CALL(initialized_, ready()); - manager_.initialize([&]() -> void { initialized_.ready(); }); -} - -TEST_F(InitManagerImplTest, Targets) { - InSequence s; - Init::MockTarget target; - - manager_.registerTarget(target, ""); - EXPECT_CALL(target, initialize(_)); - manager_.initialize([&]() -> void { initialized_.ready(); }); - EXPECT_CALL(initialized_, ready()); - target.callback_(); -} - -TEST_F(InitManagerImplTest, TargetRemoveWhileInitializing) { - InSequence s; - Init::MockTarget target; - - manager_.registerTarget(target, ""); - EXPECT_CALL(target, initialize(_)).WillOnce(Invoke([](std::function callback) -> void { - callback(); - })); - EXPECT_CALL(initialized_, ready()); - manager_.initialize([&]() -> void { initialized_.ready(); }); -} - -TEST_F(InitManagerImplTest, TargetAfterInitializing) { - InSequence s; - Init::MockTarget target1; - Init::MockTarget target2; - - manager_.registerTarget(target1, ""); - EXPECT_CALL(target1, initialize(_)); - manager_.initialize([&]() -> void { initialized_.ready(); }); - - EXPECT_CALL(target2, initialize(_)); - manager_.registerTarget(target2, ""); - - target2.callback_(); - EXPECT_CALL(initialized_, ready()); - target1.callback_(); -} - -} // namespace -} // namespace Server -} // namespace Envoy diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index 7c73374a1d986..87e5ff11c921f 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -25,7 +25,11 @@ namespace { class LdsApiTest : public testing::Test { public: - LdsApiTest() : request_(&cluster_manager_.async_client_), api_(Api::createApiForTest(store_)) {} + LdsApiTest() : request_(&cluster_manager_.async_client_), api_(Api::createApiForTest(store_)) { + ON_CALL(init_manager_, add(_)).WillByDefault(Invoke([this](const Init::Target& target) { + init_target_handle_ = target.createHandle("test"); + })); + } void setup() { const std::string config_json = R"EOF( @@ -50,12 +54,13 @@ class LdsApiTest : public testing::Test { EXPECT_CALL(cluster, info()); EXPECT_CALL(*cluster.info_, type()); interval_timer_ = new Event::MockTimer(&dispatcher_); - EXPECT_CALL(init_, registerTarget(_, _)); - lds_ = std::make_unique(lds_config, cluster_manager_, dispatcher_, random_, init_, - local_info_, store_, listener_manager_, *api_); + EXPECT_CALL(init_manager_, add(_)); + lds_ = + std::make_unique(lds_config, cluster_manager_, dispatcher_, random_, + init_manager_, local_info_, store_, listener_manager_, *api_); expectRequest(); - init_.initialize(); + init_target_handle_->initialize(init_watcher_); } void expectAdd(const std::string& listener_name, absl::optional version, @@ -121,7 +126,9 @@ class LdsApiTest : public testing::Test { NiceMock cluster_manager_; Event::MockDispatcher dispatcher_; NiceMock random_; - Init::MockManager init_; + Init::MockManager init_manager_; + Init::ExpectableWatcherImpl init_watcher_; + Init::TargetHandlePtr init_target_handle_; NiceMock local_info_; Stats::IsolatedStoreImpl store_; MockListenerManager listener_manager_; @@ -163,8 +170,8 @@ TEST_F(LdsApiTest, UnknownCluster) { Upstream::ClusterManager::ClusterInfoMap cluster_map; EXPECT_CALL(cluster_manager_, clusters()).WillOnce(Return(cluster_map)); EXPECT_THROW_WITH_MESSAGE( - LdsApiImpl(lds_config, cluster_manager_, dispatcher_, random_, init_, local_info_, store_, - listener_manager_, *api_), + LdsApiImpl(lds_config, cluster_manager_, dispatcher_, random_, init_manager_, local_info_, + store_, listener_manager_, *api_), EnvoyException, "envoy::api::v2::core::ConfigSource must have a statically defined non-EDS " "cluster: 'foo_cluster' does not exist, was added via api, or is an " @@ -191,7 +198,7 @@ TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)) .WillOnce(Throw(EnvoyException("something is wrong"))); - EXPECT_CALL(init_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE( lds_->onConfigUpdate(listeners, ""), EnvoyException, @@ -209,7 +216,7 @@ TEST_F(LdsApiTest, EmptyListenersUpdate) { EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); - EXPECT_CALL(init_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(request_, cancel()); lds_->onConfigUpdate(listeners, ""); @@ -237,7 +244,7 @@ TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { .WillOnce(Return(true)) .WillOnce(Throw(EnvoyException("something else is wrong"))); - EXPECT_CALL(init_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE(lds_->onConfigUpdate(listeners, ""), EnvoyException, "Error adding/updating listener(s) invalid-listener-1: something is " @@ -285,8 +292,8 @@ TEST_F(LdsApiTest, BadLocalInfo) { EXPECT_CALL(*cluster.info_, type()); ON_CALL(local_info_, clusterName()).WillByDefault(Return(std::string())); EXPECT_THROW_WITH_MESSAGE( - LdsApiImpl(lds_config, cluster_manager_, dispatcher_, random_, init_, local_info_, store_, - listener_manager_, *api_), + LdsApiImpl(lds_config, cluster_manager_, dispatcher_, random_, init_manager_, local_info_, + store_, listener_manager_, *api_), EnvoyException, "lds: node 'id' and 'cluster' are required. Set it either in 'node' config or via " "--service-node and --service-cluster options."); @@ -324,7 +331,7 @@ TEST_F(LdsApiTest, Basic) { makeListenersAndExpectCall({}); expectAdd("listener1", "0", true); expectAdd("listener2", "0", true); - EXPECT_CALL(init_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); @@ -397,7 +404,7 @@ TEST_F(LdsApiTest, TlsConfigWithoutCaCert) { makeListenersAndExpectCall({"listener0"}); expectAdd("listener0", {}, true); - EXPECT_CALL(init_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); @@ -474,7 +481,7 @@ TEST_F(LdsApiTest, Failure) { Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "200"}}})); message->body() = std::make_unique(response_json); - EXPECT_CALL(init_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); @@ -524,7 +531,7 @@ TEST_F(LdsApiTest, ReplacingListenerWithSameAddress) { makeListenersAndExpectCall({}); expectAdd("listener1", "0", true); expectAdd("listener2", "0", true); - EXPECT_CALL(init_.initialized_, ready()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 5c0cf8bc1ee6e..53f9e086ba7ee 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -51,7 +51,7 @@ class ListenerHandle { MOCK_METHOD0(onDestroy, void()); - Init::MockTarget target_; + Init::ExpectableTargetImpl target_; MockDrainManager* drain_manager_ = new MockDrainManager(); Configuration::FactoryContext* context_{}; }; @@ -85,7 +85,7 @@ class ListenerManagerImplTest : public testing::Test { std::shared_ptr notifier(raw_listener); raw_listener->context_ = &context; if (need_init) { - context.initManager().registerTarget(notifier->target_, ""); + context.initManager().add(notifier->target_); } return {[notifier](Network::FilterManager&) -> void {}}; })); @@ -866,7 +866,7 @@ filter_chains: {} ListenerHandle* listener_baz = expectListenerCreate(true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true)); - EXPECT_CALL(listener_baz->target_, initialize(_)); + EXPECT_CALL(listener_baz->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "version5", true)); EXPECT_EQ(2UL, manager_->listeners().size()); @@ -931,9 +931,9 @@ version_info: version5 ListenerHandle* listener_baz_update1 = expectListenerCreate(true); EXPECT_CALL(*listener_baz, onDestroy()).WillOnce(Invoke([listener_baz]() -> void { // Call the initialize callback during destruction like RDS will. - listener_baz->target_.callback_(); + listener_baz->target_.ready(); })); - EXPECT_CALL(listener_baz_update1->target_, initialize(_)); + EXPECT_CALL(listener_baz_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromJson(listener_baz_update1_json), "", true)); EXPECT_EQ(2UL, manager_->listeners().size()); @@ -941,7 +941,7 @@ version_info: version5 // Finish initialization for baz which should make it active. EXPECT_CALL(*worker_, addListener(_, _)); - listener_baz_update1->target_.callback_(); + listener_baz_update1->target_.ready(); EXPECT_EQ(3UL, manager_->listeners().size()); worker_->callAddCompletion(true); checkStats(3, 3, 0, 0, 3, 0); @@ -1090,7 +1090,7 @@ TEST_F(ListenerManagerImplTest, RemoveListener) { ListenerHandle* listener_foo = expectListenerCreate(true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true)); - EXPECT_CALL(listener_foo->target_, initialize(_)); + EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromJson(listener_foo_json), "", true)); EXPECT_EQ(0UL, manager_->listeners().size()); checkStats(1, 0, 0, 1, 0, 0); @@ -1104,11 +1104,11 @@ TEST_F(ListenerManagerImplTest, RemoveListener) { // Add foo again and initialize it. listener_foo = expectListenerCreate(true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true)); - EXPECT_CALL(listener_foo->target_, initialize(_)); + EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromJson(listener_foo_json), "", true)); checkStats(2, 0, 1, 1, 0, 0); EXPECT_CALL(*worker_, addListener(_, _)); - listener_foo->target_.callback_(); + listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(2, 0, 1, 0, 1, 0); @@ -1125,7 +1125,7 @@ TEST_F(ListenerManagerImplTest, RemoveListener) { )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true); - EXPECT_CALL(listener_foo_update1->target_, initialize(_)); + EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromJson(listener_foo_update1_json), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); @@ -1212,7 +1212,7 @@ TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { ListenerHandle* listener_foo = expectListenerCreate(true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, false)); - EXPECT_CALL(listener_foo->target_, initialize(_)); + EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromJson(listener_foo_json), "", true)); // Add bar with same non-binding address. Should fail. @@ -1234,7 +1234,7 @@ TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { // Move foo to active and then try to add again. This should still fail. EXPECT_CALL(*worker_, addListener(_, _)); - listener_foo->target_.callback_(); + listener_foo->target_.ready(); worker_->callAddCompletion(true); listener_bar = expectListenerCreate(true); diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 6206b8282e4b6..c961b393e2f09 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -80,7 +80,7 @@ class RunHelperTest : public testing::Test { NiceMock cm_; NiceMock access_log_manager_; NiceMock overload_manager_; - InitManagerImpl init_manager_{""}; + Init::ManagerImpl init_manager_{""}; ReadyWatcher start_workers_; std::unique_ptr helper_; std::function cm_init_callback_; @@ -105,13 +105,13 @@ TEST_F(RunHelperTest, ShutdownBeforeCmInitialize) { TEST_F(RunHelperTest, ShutdownBeforeInitManagerInit) { EXPECT_CALL(start_workers_, ready()).Times(0); - Init::MockTarget target; - init_manager_.registerTarget(target, ""); - EXPECT_CALL(target, initialize(_)); + Init::ExpectableTargetImpl target; + init_manager_.add(target); + EXPECT_CALL(target, initialize()); cm_init_callback_(); sigterm_->callback_(); EXPECT_CALL(server_, isShutdown()).WillOnce(Return(shutdown_)); - target.callback_(); + target.ready(); } // Class creates minimally viable server instance for testing. From fcb7af6b49bea65e3a47dc6028e4baf0d9c90a3b Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Wed, 27 Mar 2019 08:14:42 -0400 Subject: [PATCH 022/165] time: sim-time thread safety and move guard-dog fully into abstract time. (#6369) * Rework guarddog_impl.cc using timers rather than condvar timed waits. Signed-off-by: Joshua Marantz --- include/envoy/event/dispatcher.h | 9 +- source/common/event/BUILD | 1 + source/common/event/dispatcher_impl.cc | 7 +- source/common/event/libevent_scheduler.cc | 29 ++- source/common/event/libevent_scheduler.h | 12 +- source/server/guarddog_impl.cc | 73 ++++---- source/server/guarddog_impl.h | 57 ++++-- test/server/BUILD | 1 + test/server/guarddog_impl_test.cc | 173 +++++++++++------- test/test_common/simulated_time_system.cc | 105 +++++++---- test/test_common/simulated_time_system.h | 9 +- .../test_common/simulated_time_system_test.cc | 6 +- tools/check_format.py | 3 +- 13 files changed, 307 insertions(+), 178 deletions(-) diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index a23872326c271..1e0b52a10f270 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -157,7 +157,14 @@ class Dispatcher { * called) or non-blocking mode where only active events will be executed and then * run() will return. */ - enum class RunType { Block, NonBlock }; + enum class RunType { + Block, // Executes any events that have been activated, then exit. + NonBlock, // Waits for any pending events to activate, executes them, + // then exits. Exits immediately if there are no pending or + // active events. + RunUntilExit // Runs the event-loop until loopExit() is called, blocking + // until there are pending or active events. + }; virtual void run(RunType type) PURE; /** diff --git a/source/common/event/BUILD b/source/common/event/BUILD index a07cf826d7349..478fc28eb4c87 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -98,6 +98,7 @@ envoy_cc_library( deps = [ ":libevent_lib", ":timer_lib", + "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", "//source/common/common:assert_lib", ], diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index 995e551b83519..8e737de4de17f 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -164,12 +164,7 @@ void DispatcherImpl::run(RunType type) { // not guarantee that events are run in any particular order. So even if we post() and call // event_base_once() before some other event, the other event might get called first. runPostCallbacks(); - - if (type == RunType::NonBlock) { - base_scheduler_.nonBlockingLoop(); - } else { - base_scheduler_.blockingLoop(); - } + base_scheduler_.run(type); } void DispatcherImpl::runPostCallbacks() { diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index ab28dc2a6f246..5b35ffd18447e 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -15,21 +15,30 @@ TimerPtr LibeventScheduler::createTimer(const TimerCb& cb) { return std::make_unique(libevent_, cb); }; -void LibeventScheduler::nonBlockingLoop() { +void LibeventScheduler::run(Dispatcher::RunType mode) { + int flag = 0; + switch (mode) { + case Dispatcher::RunType::NonBlock: + flag = EVLOOP_NONBLOCK; #ifdef WIN32 - // On Windows, EVLOOP_NONBLOCK will cause the libevent event_base_loop to run forever. - // This is because libevent only supports level triggering on Windows, and so the write - // event callbacks will trigger every time through the loop. Adding EVLOOP_ONCE ensures the - // loop will run at most once - const int flag = EVLOOP_NONBLOCK | EVLOOP_ONCE; -#else - const int flag = EVLOOP_NONBLOCK; + // On Windows, EVLOOP_NONBLOCK will cause the libevent event_base_loop to run forever. + // This is because libevent only supports level triggering on Windows, and so the write + // event callbacks will trigger every time through the loop. Adding EVLOOP_ONCE ensures the + // loop will run at most once + flag |= EVLOOP_NONBLOCK | EVLOOP_ONCE; #endif + break; + case Dispatcher::RunType::Block: + // The default flags have 'block' behavior. See + // http://www.wangafu.net/~nickm/libevent-book/Ref3_eventloop.html + break; + case Dispatcher::RunType::RunUntilExit: + flag = EVLOOP_NO_EXIT_ON_EMPTY; + break; + } event_base_loop(libevent_.get(), flag); } -void LibeventScheduler::blockingLoop() { event_base_loop(libevent_.get(), 0); } - void LibeventScheduler::loopExit() { event_base_loopexit(libevent_.get(), nullptr); } } // namespace Event diff --git a/source/common/event/libevent_scheduler.h b/source/common/event/libevent_scheduler.h index b15dc26ef0551..5a41e1ccf6c4f 100644 --- a/source/common/event/libevent_scheduler.h +++ b/source/common/event/libevent_scheduler.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "common/event/libevent.h" @@ -18,14 +19,11 @@ class LibeventScheduler : public Scheduler { TimerPtr createTimer(const TimerCb& cb) override; /** - * Runs the libevent loop once, without blocking. - */ - void nonBlockingLoop(); - - /** - * Runs the libevent loop once, with block. + * Runs the event loop. + * + * @param mode The mode in which to run the event loop. */ - void blockingLoop(); + void run(Dispatcher::RunType mode); /** * Exits the libevent loop. diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index ae75f884afd0d..67b55ca008401 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -17,10 +17,10 @@ namespace Envoy { namespace Server { GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, - Api::Api& api) - : time_source_(api.timeSource()), miss_timeout_(config.wdMissTimeout()), - megamiss_timeout_(config.wdMegaMissTimeout()), kill_timeout_(config.wdKillTimeout()), - multi_kill_timeout_(config.wdMultiKillTimeout()), + Api::Api& api, std::unique_ptr&& test_interlock) + : test_interlock_hook_(std::move(test_interlock)), time_source_(api.timeSource()), + miss_timeout_(config.wdMissTimeout()), megamiss_timeout_(config.wdMegaMissTimeout()), + kill_timeout_(config.wdKillTimeout()), multi_kill_timeout_(config.wdMultiKillTimeout()), loop_interval_([&]() -> std::chrono::milliseconds { // The loop interval is simply the minimum of all specified intervals, // but we must account for the 0=disabled case. This lambda takes care @@ -32,15 +32,28 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio }()), watchdog_miss_counter_(stats_scope.counter("server.watchdog_miss")), watchdog_megamiss_counter_(stats_scope.counter("server.watchdog_mega_miss")), - run_thread_(true) { + dispatcher_(api.allocateDispatcher()), + loop_timer_(dispatcher_->createTimer([this]() { step(); })), run_thread_(true) { start(api); } +GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, + Api::Api& api) + : GuardDogImpl(stats_scope, config, api, std::make_unique()) {} + GuardDogImpl::~GuardDogImpl() { stop(); } -void GuardDogImpl::threadRoutine() { - do { - const auto now = time_source_.monotonicTime(); +void GuardDogImpl::step() { + { + Thread::LockGuard guard(mutex_); + if (!run_thread_) { + return; + } + } + + const auto now = time_source_.monotonicTime(); + + { bool seen_one_multi_timeout(false); Thread::LockGuard guard(wd_lock_); for (auto& watched_dog : watched_dogs_) { @@ -79,7 +92,15 @@ void GuardDogImpl::threadRoutine() { } } } - } while (waitOrDetectStop()); + } + + { + Thread::LockGuard guard(mutex_); + test_interlock_hook_->signalFromImpl(now); + if (run_thread_) { + loop_timer_->enableTimer(loop_interval_); + } + } } WatchDogSharedPtr GuardDogImpl::createWatchDog(Thread::ThreadIdPtr&& thread_id) { @@ -111,41 +132,19 @@ void GuardDogImpl::stopWatching(WatchDogSharedPtr wd) { } } -bool GuardDogImpl::waitOrDetectStop() { - force_checked_event_.notifyAll(); - Thread::LockGuard guard(exit_lock_); - // Spurious wakeups are OK without explicit handling. We'll just check - // earlier than strictly required for that round. - - // Preferably, we should be calling - // time_system_.waitFor(exit_lock_, exit_event_, loop_interval_); - // here, but that makes GuardDogMissTest.* very flaky. The reason that - // directly calling condvar waitFor works is that it doesn't advance - // simulated time, which the test is carefully controlling. - // - // One alternative approach that would be easier to test is to use a private - // dispatcher and a TimerCB to execute the loop body of threadRoutine(). In - // this manner, the same dynamics would occur in production, with added - // overhead from libevent, But then the unit-test would purely control the - // advancement of time, and thus be more robust. Another variation would be - // to run this watchdog on the main-thread dispatcher, though such an approach - // could not detect when the main-thread was stuck. - exit_event_.waitFor(exit_lock_, loop_interval_); // NO_CHECK_FORMAT(real_time) - - return run_thread_; -} - void GuardDogImpl::start(Api::Api& api) { - run_thread_ = true; - thread_ = api.threadFactory().createThread([this]() -> void { threadRoutine(); }); + Thread::LockGuard guard(mutex_); + thread_ = api.threadFactory().createThread( + [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }); + loop_timer_->enableTimer(std::chrono::milliseconds(0)); } void GuardDogImpl::stop() { { - Thread::LockGuard guard(exit_lock_); + Thread::LockGuard guard(mutex_); run_thread_ = false; - exit_event_.notifyAll(); } + dispatcher_->exit(); if (thread_) { thread_->join(); thread_.reset(); diff --git a/source/server/guarddog_impl.h b/source/server/guarddog_impl.h index c01cbda1bbc57..7f07ba898ce71 100644 --- a/source/server/guarddog_impl.h +++ b/source/server/guarddog_impl.h @@ -32,13 +32,40 @@ namespace Server { */ class GuardDogImpl : public GuardDog { public: + /** + * Defines a test interlock hook to enable tests to synchronize the guard-dog + * execution so they can probe current counter values. The default + * implementation that runs in production has empty methods, which are + * overridden in the implementation used during tests. + */ + class TestInterlockHook { + public: + virtual ~TestInterlockHook() = default; + + /** + * Called from GuardDogImpl to indicate that it has evaluated all watch-dogs + * up to a particular point in time. + */ + virtual void signalFromImpl(MonotonicTime) {} + + /** + * Called from GuardDog tests to block until the implementation has reached + * the desired point in time. + */ + virtual void waitFromTest(Thread::MutexBasicLockable&, MonotonicTime) {} + }; + /** * @param stats_scope Statistics scope to write watchdog_miss and * watchdog_mega_miss events into. * @param config Configuration object. + * @param api API object. + * @param test_interlock a hook for enabling interlock with unit tests. * * See the configuration documentation for details on the timeout settings. */ + GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, Api::Api& api, + std::unique_ptr&& test_interlock); GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, Api::Api& api); ~GuardDogImpl(); @@ -46,10 +73,17 @@ class GuardDogImpl : public GuardDog { * Exposed for testing purposes only (but harmless to call): */ int loopIntervalForTest() const { return loop_interval_.count(); } + + /** + * Test hook to force a step() to catch up with the current simulated + * time. This is inlined so that it does not need to be present in the + * production binary. + */ void forceCheckForTest() { - exit_event_.notifyAll(); - Thread::LockGuard guard(exit_lock_); - force_checked_event_.wait(exit_lock_); + Thread::LockGuard guard(mutex_); + MonotonicTime now = time_source_.monotonicTime(); + loop_timer_->enableTimer(std::chrono::milliseconds(0)); + test_interlock_hook_->waitFromTest(mutex_, now); } // Server::GuardDog @@ -57,12 +91,8 @@ class GuardDogImpl : public GuardDog { void stopWatching(WatchDogSharedPtr wd) override; private: - void threadRoutine(); - /** - * @return True if we should continue, false if signalled to stop. - */ - bool waitOrDetectStop(); - void start(Api::Api& api) EXCLUSIVE_LOCKS_REQUIRED(exit_lock_); + void start(Api::Api& api); + void step(); void stop(); // Per the C++ standard it is OK to use these in ctor initializer as long as // it is after kill and multikill timeout values are initialized. @@ -76,6 +106,7 @@ class GuardDogImpl : public GuardDog { bool megamiss_alerted_{}; }; + std::unique_ptr test_interlock_hook_; TimeSource& time_source_; const std::chrono::milliseconds miss_timeout_; const std::chrono::milliseconds megamiss_timeout_; @@ -87,10 +118,10 @@ class GuardDogImpl : public GuardDog { std::vector watched_dogs_ GUARDED_BY(wd_lock_); Thread::MutexBasicLockable wd_lock_; Thread::ThreadPtr thread_; - Thread::MutexBasicLockable exit_lock_; - Thread::CondVar exit_event_; - bool run_thread_ GUARDED_BY(exit_lock_); - Thread::CondVar force_checked_event_; + Event::DispatcherPtr dispatcher_; + Event::TimerPtr loop_timer_; + Thread::MutexBasicLockable mutex_; + bool run_thread_ GUARDED_BY(mutex_); }; } // namespace Server diff --git a/test/server/BUILD b/test/server/BUILD index 8cae70848f762..8b31ee942cad7 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -86,6 +86,7 @@ envoy_cc_test( deps = [ "//include/envoy/common:time_interface", "//source/common/api:api_lib", + "//source/common/common:macros", "//source/common/common:utility_lib", "//source/common/stats:stats_lib", "//source/server:guarddog_lib", diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 3e3f2ccd0ff75..2a398ed31530b 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -5,6 +5,7 @@ #include "envoy/common/time.h" #include "common/api/api_impl.h" +#include "common/common/macros.h" #include "common/common/utility.h" #include "server/guarddog_impl.h" @@ -13,6 +14,7 @@ #include "test/mocks/server/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_time.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -25,15 +27,58 @@ namespace Envoy { namespace Server { namespace { -class GuardDogTestBase : public testing::Test { +class DebugTestInterlock : public GuardDogImpl::TestInterlockHook { +public: + // GuardDogImpl::TestInterlockHook + virtual void signalFromImpl(MonotonicTime time) { + impl_reached_ = time; + impl_.notifyAll(); + } + + virtual void waitFromTest(Thread::MutexBasicLockable& mutex, MonotonicTime time) + EXCLUSIVE_LOCKS_REQUIRED(mutex) { + while (impl_reached_ < time) { + impl_.wait(mutex); + } + } + +private: + Thread::CondVar impl_; + MonotonicTime impl_reached_; +}; + +// We want to make sure guard-dog is tested with both simulated time and real +// time, to ensure that it works in production, and that it works in the context +// of integration tests which are much easier to control with simulated time. +enum class TimeSystemType { Real, Simulated }; + +class GuardDogTestBase : public testing::TestWithParam { protected: - GuardDogTestBase() : api_(Api::createApiForTest(stats_store_, time_system_)) {} + GuardDogTestBase() + : time_system_(makeTimeSystem()), api_(Api::createApiForTest(stats_store_, *time_system_)) {} + + static std::unique_ptr makeTimeSystem() { + if (GetParam() == TimeSystemType::Real) { + return std::make_unique(); + } + ASSERT(GetParam() == TimeSystemType::Simulated); + return std::make_unique(); + } + + void initGuardDog(Stats::Scope& stats_scope, const Server::Configuration::Main& config) { + guard_dog_ = std::make_unique(stats_scope, config, *api_, + std::make_unique()); + } - Event::SimulatedTimeSystem time_system_; + std::unique_ptr time_system_; Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; + std::unique_ptr guard_dog_; }; +INSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogTestBase, + testing::ValuesIn({TimeSystemType::Real, TimeSystemType::Simulated})); + /** * Death test caveat: Because of the way we die gcov doesn't receive coverage * information from the forked process that is checked for successful death. @@ -52,10 +97,10 @@ class GuardDogDeathTest : public GuardDogTestBase { */ void SetupForDeath() { InSequence s; - guard_dog_ = std::make_unique(fakestats_, config_kill_, *api_); + initGuardDog(fakestats_, config_kill_); unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); guard_dog_->forceCheckForTest(); - time_system_.sleep(std::chrono::milliseconds(500)); + time_system_->sleep(std::chrono::milliseconds(99)); // 1 ms shy of death. } /** @@ -64,18 +109,17 @@ class GuardDogDeathTest : public GuardDogTestBase { */ void SetupForMultiDeath() { InSequence s; - guard_dog_ = std::make_unique(fakestats_, config_multikill_, *api_); + initGuardDog(fakestats_, config_multikill_); auto unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); guard_dog_->forceCheckForTest(); auto second_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); guard_dog_->forceCheckForTest(); - time_system_.sleep(std::chrono::milliseconds(501)); + time_system_->sleep(std::chrono::milliseconds(499)); // 1 ms shy of multi-death. } NiceMock config_kill_; NiceMock config_multikill_; NiceMock fakestats_; - std::unique_ptr guard_dog_; WatchDogSharedPtr unpet_dog_; WatchDogSharedPtr second_dog_; }; @@ -84,54 +128,57 @@ class GuardDogDeathTest : public GuardDogTestBase { // a different name. class GuardDogAlmostDeadTest : public GuardDogDeathTest {}; -TEST_F(GuardDogDeathTest, KillDeathTest) { +TEST_P(GuardDogDeathTest, KillDeathTest) { // Is it German for "The Function"? Almost... auto die_function = [&]() -> void { SetupForDeath(); + time_system_->sleep(std::chrono::milliseconds(401)); // 400 ms past death. guard_dog_->forceCheckForTest(); }; + // Why do it this way? Any threads must be started inside the death test // statement and this is the easiest way to accomplish that. EXPECT_DEATH(die_function(), ""); } -TEST_F(GuardDogAlmostDeadTest, KillNoFinalCheckTest) { - // This does everything the death test does except the final force check that - // should actually result in dying. The death test does not verify that there +TEST_P(GuardDogAlmostDeadTest, KillNoFinalCheckTest) { + // This does everything the death test does, except allow enough time to + // expire to reach the death panic. The death test does not verify that there // was not a crash *before* the expected line, so this test checks that. SetupForDeath(); } -TEST_F(GuardDogDeathTest, MultiKillDeathTest) { +TEST_P(GuardDogDeathTest, MultiKillDeathTest) { auto die_function = [&]() -> void { SetupForMultiDeath(); + time_system_->sleep(std::chrono::milliseconds(2)); // 1 ms past multi-death. guard_dog_->forceCheckForTest(); }; EXPECT_DEATH(die_function(), ""); } -TEST_F(GuardDogAlmostDeadTest, MultiKillNoFinalCheckTest) { - // This does everything the death test does except the final force check that +TEST_P(GuardDogAlmostDeadTest, MultiKillNoFinalCheckTest) { + // This does everything the death test does not except the final force check that // should actually result in dying. The death test does not verify that there // was not a crash *before* the expected line, so this test checks that. SetupForMultiDeath(); } -TEST_F(GuardDogAlmostDeadTest, NearDeathTest) { +TEST_P(GuardDogAlmostDeadTest, NearDeathTest) { // This ensures that if only one thread surpasses the multiple kill threshold // there is no death. The positive case is covered in MultiKillDeathTest. InSequence s; - GuardDogImpl gd(fakestats_, config_multikill_, *api_); - auto unpet_dog = gd.createWatchDog(api_->threadFactory().currentThreadId()); - auto pet_dog = gd.createWatchDog(api_->threadFactory().currentThreadId()); + initGuardDog(fakestats_, config_multikill_); + auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto pet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); // This part "waits" 600 milliseconds while one dog is touched every 100, and // the other is not. 600ms is over the threshold of 500ms for multi-kill but // only one is nonresponsive, so there should be no kill (single kill // threshold of 1s is not reached). for (int i = 0; i < 6; i++) { - time_system_.sleep(std::chrono::milliseconds(100)); + time_system_->sleep(std::chrono::milliseconds(100)); pet_dog->touch(); - gd.forceCheckForTest(); + guard_dog_->forceCheckForTest(); } } @@ -143,113 +190,113 @@ class GuardDogMissTest : public GuardDogTestBase { NiceMock config_mega_; }; -TEST_F(GuardDogMissTest, MissTest) { +TEST_P(GuardDogMissTest, MissTest) { // This test checks the actual collected statistics after doing some timer // advances that should and shouldn't increment the counters. - GuardDogImpl gd(stats_store_, config_miss_, *api_); + initGuardDog(stats_store_, config_miss_); // We'd better start at 0: EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_miss").value()); - auto unpet_dog = gd.createWatchDog(api_->threadFactory().currentThreadId()); + auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); // At 300ms we shouldn't have hit the timeout yet: - time_system_.sleep(std::chrono::milliseconds(300)); - gd.forceCheckForTest(); + time_system_->sleep(std::chrono::milliseconds(300)); + guard_dog_->forceCheckForTest(); EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_miss").value()); // This should push it past the 500ms limit: - time_system_.sleep(std::chrono::milliseconds(250)); - gd.forceCheckForTest(); + time_system_->sleep(std::chrono::milliseconds(250)); + guard_dog_->forceCheckForTest(); EXPECT_EQ(1UL, stats_store_.counter("server.watchdog_miss").value()); - gd.stopWatching(unpet_dog); + guard_dog_->stopWatching(unpet_dog); unpet_dog = nullptr; } -TEST_F(GuardDogMissTest, MegaMissTest) { +TEST_P(GuardDogMissTest, MegaMissTest) { // This test checks the actual collected statistics after doing some timer // advances that should and shouldn't increment the counters. - GuardDogImpl gd(stats_store_, config_mega_, *api_); - auto unpet_dog = gd.createWatchDog(api_->threadFactory().currentThreadId()); + initGuardDog(stats_store_, config_mega_); + auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); // We'd better start at 0: EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_mega_miss").value()); // This shouldn't be enough to increment the stat: - time_system_.sleep(std::chrono::milliseconds(499)); - gd.forceCheckForTest(); + time_system_->sleep(std::chrono::milliseconds(499)); + guard_dog_->forceCheckForTest(); EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_mega_miss").value()); // Just 2ms more will make it greater than 500ms timeout: - time_system_.sleep(std::chrono::milliseconds(2)); - gd.forceCheckForTest(); + time_system_->sleep(std::chrono::milliseconds(2)); + guard_dog_->forceCheckForTest(); EXPECT_EQ(1UL, stats_store_.counter("server.watchdog_mega_miss").value()); - gd.stopWatching(unpet_dog); + guard_dog_->stopWatching(unpet_dog); unpet_dog = nullptr; } -TEST_F(GuardDogMissTest, MissCountTest) { +TEST_P(GuardDogMissTest, MissCountTest) { // This tests a flake discovered in the MissTest where real timeout or // spurious condition_variable wakeup causes the counter to get incremented // more than it should be. - GuardDogImpl gd(stats_store_, config_miss_, *api_); - auto sometimes_pet_dog = gd.createWatchDog(api_->threadFactory().currentThreadId()); + initGuardDog(stats_store_, config_miss_); + auto sometimes_pet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); // These steps are executed once without ever touching the watchdog. // Then the last step is to touch the watchdog and repeat the steps. // This verifies that the behavior is reset back to baseline after a touch. for (unsigned long i = 0; i < 2; i++) { EXPECT_EQ(i, stats_store_.counter("server.watchdog_miss").value()); // This shouldn't be enough to increment the stat: - time_system_.sleep(std::chrono::milliseconds(499)); - gd.forceCheckForTest(); + time_system_->sleep(std::chrono::milliseconds(499)); + guard_dog_->forceCheckForTest(); EXPECT_EQ(i, stats_store_.counter("server.watchdog_miss").value()); // And if we force re-execution of the loop it still shouldn't be: - gd.forceCheckForTest(); + guard_dog_->forceCheckForTest(); EXPECT_EQ(i, stats_store_.counter("server.watchdog_miss").value()); // Just 2ms more will make it greater than 500ms timeout: - time_system_.sleep(std::chrono::milliseconds(2)); - gd.forceCheckForTest(); + time_system_->sleep(std::chrono::milliseconds(2)); + guard_dog_->forceCheckForTest(); EXPECT_EQ(i + 1, stats_store_.counter("server.watchdog_miss").value()); // Spurious wakeup, we should still only have one miss counted. - gd.forceCheckForTest(); + guard_dog_->forceCheckForTest(); EXPECT_EQ(i + 1, stats_store_.counter("server.watchdog_miss").value()); // When we finally touch the dog we should get one more increment once the // timeout value expires: sometimes_pet_dog->touch(); } - time_system_.sleep(std::chrono::milliseconds(1000)); + time_system_->sleep(std::chrono::milliseconds(1000)); sometimes_pet_dog->touch(); // Make sure megamiss still works: EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_mega_miss").value()); - time_system_.sleep(std::chrono::milliseconds(1500)); - gd.forceCheckForTest(); + time_system_->sleep(std::chrono::milliseconds(1500)); + guard_dog_->forceCheckForTest(); EXPECT_EQ(1UL, stats_store_.counter("server.watchdog_mega_miss").value()); - gd.stopWatching(sometimes_pet_dog); + guard_dog_->stopWatching(sometimes_pet_dog); sometimes_pet_dog = nullptr; } -TEST_F(GuardDogTestBase, StartStopTest) { +TEST_P(GuardDogTestBase, StartStopTest) { NiceMock stats; NiceMock config(0, 0, 0, 0); - GuardDogImpl gd(stats, config, *api_); + initGuardDog(stats, config); } -TEST_F(GuardDogTestBase, LoopIntervalNoKillTest) { +TEST_P(GuardDogTestBase, LoopIntervalNoKillTest) { NiceMock stats; NiceMock config(40, 50, 0, 0); - GuardDogImpl gd(stats, config, *api_); - EXPECT_EQ(gd.loopIntervalForTest(), 40); + initGuardDog(stats, config); + EXPECT_EQ(guard_dog_->loopIntervalForTest(), 40); } -TEST_F(GuardDogTestBase, LoopIntervalTest) { +TEST_P(GuardDogTestBase, LoopIntervalTest) { NiceMock stats; NiceMock config(100, 90, 1000, 500); - GuardDogImpl gd(stats, config, *api_); - EXPECT_EQ(gd.loopIntervalForTest(), 90); + initGuardDog(stats, config); + EXPECT_EQ(guard_dog_->loopIntervalForTest(), 90); } -TEST_F(GuardDogTestBase, WatchDogThreadIdTest) { +TEST_P(GuardDogTestBase, WatchDogThreadIdTest) { NiceMock stats; NiceMock config(100, 90, 1000, 500); - GuardDogImpl gd(stats, config, *api_); - auto watched_dog = gd.createWatchDog(api_->threadFactory().currentThreadId()); + initGuardDog(stats, config); + auto watched_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); EXPECT_EQ(watched_dog->threadId().debugString(), api_->threadFactory().currentThreadId()->debugString()); - gd.stopWatching(watched_dog); + guard_dog_->stopWatching(watched_dog); } // If this test fails it is because the std::chrono::steady_clock::duration type has become @@ -258,7 +305,7 @@ TEST_F(GuardDogTestBase, WatchDogThreadIdTest) { // // The WatchDog/GuardDog relies on this being a lock free atomic for perf reasons so some workaround // will be required if this test starts failing. -TEST_F(GuardDogTestBase, AtomicIsAtomicTest) { +TEST_P(GuardDogTestBase, AtomicIsAtomicTest) { std::atomic atomic_time; ASSERT_EQ(atomic_time.is_lock_free(), true); } diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index 00d239479d148..6727634b5f8d5 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -25,44 +25,64 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { // Timer void disableTimer() override; void enableTimer(const std::chrono::milliseconds& duration) override; - bool enabled() override { return armed_; } + bool enabled() override { + Thread::LockGuard lock(time_system_.mutex_); + return armed_; + } + + void disableTimerLockHeld() EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_); - void setTime(MonotonicTime time) { time_ = time; } + void setTimeLockHeld(MonotonicTime time) EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { + time_ = time; + } /** * Activates the timer so it will be run the next time the libevent loop is run, * typically via Dispatcher::run(). */ - void activate() { + void activateLockHeld() EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { + ASSERT(armed_); armed_ = false; - std::chrono::milliseconds duration = std::chrono::milliseconds::zero(); time_system_.incPending(); + + // We don't want to activate the alarm under lock, as it will make a libevent call, + // and libevent itself uses locks: + // https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/event.c#L1917 + time_system_.mutex_.unlock(); + std::chrono::milliseconds duration = std::chrono::milliseconds::zero(); base_timer_->enableTimer(duration); + time_system_.mutex_.lock(); } - MonotonicTime time() const { + MonotonicTime time() const EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { ASSERT(armed_); return time_; } + SimulatedTimeSystemHelper& timeSystem() { return time_system_; } uint64_t index() const { return index_; } private: + friend SimulatedTimeSystemHelper::CompareAlarms; + void runAlarm(TimerCb cb) { - time_system_.decPending(); + // Capture time_system_ in a local in case the alarm gets deleted in the callback. + SimulatedTimeSystemHelper& time_system = time_system_; cb(); + time_system.decPending(); } TimerPtr base_timer_; SimulatedTimeSystemHelper& time_system_; - MonotonicTime time_; - uint64_t index_; - bool armed_; + MonotonicTime time_ GUARDED_BY(time_system_.mutex_); + const uint64_t index_; + bool armed_ GUARDED_BY(time_system_.mutex_); }; // Compare two alarms, based on wakeup time and insertion order. Returns true if // a comes before b. -bool SimulatedTimeSystemHelper::CompareAlarms::operator()(const Alarm* a, const Alarm* b) const { +bool SimulatedTimeSystemHelper::CompareAlarms::operator()(const Alarm* a, const Alarm* b) const + EXCLUSIVE_LOCKS_REQUIRED(a->time_system_.mutex_, b->time_system_.mutex_) { if (a != b) { if (a->time() < b->time()) { return true; @@ -97,20 +117,26 @@ SimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { } void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() { + Thread::LockGuard lock(time_system_.mutex_); + disableTimerLockHeld(); +} + +void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimerLockHeld() { if (armed_) { - time_system_.removeAlarm(this); + time_system_.removeAlarmLockHeld(this); armed_ = false; } } void SimulatedTimeSystemHelper::Alarm::Alarm::enableTimer( const std::chrono::milliseconds& duration) { - disableTimer(); + Thread::LockGuard lock(time_system_.mutex_); + disableTimerLockHeld(); armed_ = true; if (duration.count() == 0) { - activate(); + activateLockHeld(); } else { - time_system_.addAlarm(this, duration); + time_system_.addAlarmLockHeld(this, duration); } } @@ -152,9 +178,9 @@ void SimulatedTimeSystemHelper::sleep(const Duration& duration) { setMonotonicTimeAndUnlock(monotonic_time); } -Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor(Thread::MutexBasicLockable& mutex, - Thread::CondVar& condvar, - const Duration& duration) noexcept { +Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( + Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) { const Duration real_time_poll_delay( std::min(std::chrono::duration_cast(std::chrono::milliseconds(50)), duration)); const MonotonicTime end_time = monotonicTime() + duration; @@ -177,7 +203,8 @@ Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor(Thread::MutexBasi setMonotonicTimeAndUnlock(end_time); } else { // If there's another alarm pending, sleep forward to it. - MonotonicTime next_wakeup = (*alarms_.begin())->time(); + Alarm* alarm = (*alarms_.begin()); + MonotonicTime next_wakeup = alarmTimeLockHeld(alarm); setMonotonicTimeAndUnlock(std::min(next_wakeup, end_time)); } } else { @@ -189,21 +216,33 @@ Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor(Thread::MutexBasi return Thread::CondVar::WaitStatus::Timeout; } +MonotonicTime SimulatedTimeSystemHelper::alarmTimeLockHeld(Alarm* alarm) NO_THREAD_SAFETY_ANALYSIS { + // We disable thread-safety analysis as the compiler can't detect that + // alarm_->timeSystem() == this, so we must be holding the right mutex. + ASSERT(&(alarm->timeSystem()) == this); + return alarm->time(); +} + +void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm* alarm) NO_THREAD_SAFETY_ANALYSIS { + // We disable thread-safety analysis as the compiler can't detect that + // alarm_->timeSystem() == this, so we must be holding the right mutex. + ASSERT(&(alarm->timeSystem()) == this); + alarm->activateLockHeld(); +} + int64_t SimulatedTimeSystemHelper::nextIndex() { Thread::LockGuard lock(mutex_); return index_++; } -void SimulatedTimeSystemHelper::addAlarm(Alarm* alarm, const std::chrono::milliseconds& duration) { - Thread::LockGuard lock(mutex_); - alarm->setTime(monotonic_time_ + duration); +void SimulatedTimeSystemHelper::addAlarmLockHeld( + Alarm* alarm, const std::chrono::milliseconds& duration) NO_THREAD_SAFETY_ANALYSIS { + ASSERT(&(alarm->timeSystem()) == this); + alarm->setTimeLockHeld(monotonic_time_ + duration); alarms_.insert(alarm); } -void SimulatedTimeSystemHelper::removeAlarm(Alarm* alarm) { - Thread::LockGuard lock(mutex_); - alarms_.erase(alarm); -} +void SimulatedTimeSystemHelper::removeAlarmLockHeld(Alarm* alarm) { alarms_.erase(alarm); } SchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& base_scheduler) { return std::make_unique(*this, base_scheduler); @@ -223,20 +262,16 @@ void SimulatedTimeSystemHelper::setMonotonicTimeAndUnlock(const MonotonicTime& m while (!alarms_.empty()) { AlarmSet::iterator pos = alarms_.begin(); Alarm* alarm = *pos; - if (alarm->time() > monotonic_time) { + MonotonicTime alarm_time = alarmTimeLockHeld(alarm); + if (alarm_time > monotonic_time) { break; } - ASSERT(alarm->time() >= monotonic_time_); + ASSERT(alarm_time >= monotonic_time_); system_time_ += - std::chrono::duration_cast(alarm->time() - monotonic_time_); - monotonic_time_ = alarm->time(); + std::chrono::duration_cast(alarm_time - monotonic_time_); + monotonic_time_ = alarm_time; alarms_.erase(pos); - mutex_.unlock(); - // We don't want to activate the alarm under lock, as it will make a libevent call, - // and libevent itself uses locks: - // https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/event.c#L1917 - alarm->activate(); - mutex_.lock(); + alarmActivateLockHeld(alarm); } system_time_ += std::chrono::duration_cast(monotonic_time - monotonic_time_); diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 413f1485c3eb8..11ccad33a8e87 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -61,6 +61,7 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { private: class SimulatedScheduler; class Alarm; + friend class Alarm; // Needed to reference mutex for thread annotations. struct CompareAlarms { bool operator()(const Alarm* a, const Alarm* b) const; }; @@ -76,13 +77,17 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { */ void setMonotonicTimeAndUnlock(const MonotonicTime& monotonic_time) UNLOCK_FUNCTION(mutex_); + MonotonicTime alarmTimeLockHeld(Alarm* alarm) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void alarmActivateLockHeld(Alarm* alarm) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + // The simulation keeps a unique ID for each alarm to act as a deterministic // tie-breaker for alarm-ordering. int64_t nextIndex(); // Adds/removes an alarm. - void addAlarm(Alarm*, const std::chrono::milliseconds& duration); - void removeAlarm(Alarm*); + void addAlarmLockHeld(Alarm*, const std::chrono::milliseconds& duration) + EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeAlarmLockHeld(Alarm*) EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Keeps track of how many alarms have been activated but not yet called, // which helps waitFor() determine when to give up and declare a timeout. diff --git a/test/test_common/simulated_time_system_test.cc b/test/test_common/simulated_time_system_test.cc index 84091269d45d1..4f1fdf6023b0c 100644 --- a/test/test_common/simulated_time_system_test.cc +++ b/test/test_common/simulated_time_system_test.cc @@ -33,12 +33,12 @@ class SimulatedTimeSystemTest : public testing::Test { void sleepMsAndLoop(int64_t delay_ms) { time_system_.sleep(std::chrono::milliseconds(delay_ms)); - base_scheduler_.nonBlockingLoop(); + base_scheduler_.run(Dispatcher::RunType::NonBlock); } void advanceSystemMsAndLoop(int64_t delay_ms) { time_system_.setSystemTime(time_system_.systemTime() + std::chrono::milliseconds(delay_ms)); - base_scheduler_.nonBlockingLoop(); + base_scheduler_.run(Dispatcher::RunType::NonBlock); } LibeventScheduler base_scheduler_; @@ -66,7 +66,7 @@ TEST_F(SimulatedTimeSystemTest, WaitFor) { std::atomic done(false); auto thread = Thread::threadFactoryForTest().createThread([this, &done]() { while (!done) { - base_scheduler_.blockingLoop(); + base_scheduler_.run(Dispatcher::RunType::Block); } }); Thread::CondVar condvar; diff --git a/tools/check_format.py b/tools/check_format.py index 7983dd5822aec..4be8bd4e6999b 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -388,7 +388,8 @@ def checkSourceLine(line, file_path, reportError): # legitimately show up in comments, for example this one. reportError("Don't use , use absl::Mutex for reader/writer locks.") if not whitelistedForRealTime(file_path) and not 'NO_CHECK_FORMAT(real_time)' in line: - if 'RealTimeSource' in line or 'RealTimeSystem' in line or \ + if 'RealTimeSource' in line or \ + ('RealTimeSystem' in line and not 'TestRealTimeSystem' in line) or \ 'std::chrono::system_clock::now' in line or 'std::chrono::steady_clock::now' in line or \ 'std::this_thread::sleep_for' in line or hasCondVarWaitFor(line): reportError("Don't reference real-world time sources from production code; use injection") From a4507773353456f0a6228dee9fe1b9836cd36562 Mon Sep 17 00:00:00 2001 From: danzh Date: Wed, 27 Mar 2019 08:51:57 -0400 Subject: [PATCH 023/165] quiche: implement QuicFileUtils (#6375) Add QuicFileUtilsImpl using Envoy::FileSystem. Risk Level: low Testing: Added tests in test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc and tested with --define quiche=enabled Part of #2557 Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 11 +++- .../quic_listeners/quiche/platform/BUILD | 3 + .../quiche/platform/quic_file_utils_impl.cc | 55 +++++++++++++++++ .../quiche/platform/quic_file_utils_impl.h | 28 +++++++++ .../quiche/platform/quic_platform_test.cc | 61 +++++++++++++++++++ 5 files changed, 155 insertions(+), 3 deletions(-) create mode 100644 source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc create mode 100644 source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 149db513f83f3..59b4d894ab894 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -106,7 +106,10 @@ cc_library( cc_library( name = "quic_platform", srcs = ["quiche/quic/platform/api/quic_mutex.cc"] + envoy_select_quiche( - ["quiche/quic/platform/api/quic_hostname_utils.cc"], + [ + "quiche/quic/platform/api/quic_file_utils.cc", + "quiche/quic/platform/api/quic_hostname_utils.cc", + ], "@envoy", ), hdrs = [ @@ -114,7 +117,10 @@ cc_library( "quiche/quic/platform/api/quic_mutex.h", "quiche/quic/platform/api/quic_str_cat.h", ] + envoy_select_quiche( - ["quiche/quic/platform/api/quic_hostname_utils.h"], + [ + "quiche/quic/platform/api/quic_file_utils.h", + "quiche/quic/platform/api/quic_hostname_utils.h", + ], "@envoy", ), visibility = ["//visibility:public"], @@ -154,7 +160,6 @@ cc_library( "quiche/quic/platform/api/quic_uint128.h", # TODO: uncomment the following files as implementations are added. # "quiche/quic/platform/api/quic_clock.h", - # "quiche/quic/platform/api/quic_file_utils.h", # "quiche/quic/platform/api/quic_flags.h", # "quiche/quic/platform/api/quic_fuzzed_data_provider.h", # "quiche/quic/platform/api/quic_goog_cc_sender.h", diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index f6f8c67631a02..f9aa6755c62d3 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -124,6 +124,7 @@ envoy_cc_library( envoy_cc_library( name = "quic_platform_impl_lib", srcs = ["quic_cert_utils_impl.cc"] + envoy_select_quiche([ + "quic_file_utils_impl.cc", "quic_hostname_utils_impl.cc", "quic_test_output_impl.cc", ]), @@ -132,6 +133,7 @@ envoy_cc_library( "quic_mutex_impl.h", "quic_str_cat_impl.h", ] + envoy_select_quiche([ + "quic_file_utils_impl.h", "quic_hostname_utils_impl.h", "quic_string_utils_impl.h", "quic_test_output_impl.h", @@ -147,6 +149,7 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = envoy_select_quiche([ ":string_utils_lib", + "//source/common/filesystem:directory_lib", "//source/common/filesystem:filesystem_lib", "//source/common/http:utility_lib", ]), diff --git a/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc new file mode 100644 index 0000000000000..00aaeef0161cb --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.cc @@ -0,0 +1,55 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h" + +#include "common/filesystem/directory.h" +#include "common/filesystem/filesystem_impl.h" + +#include "absl/strings/str_cat.h" + +namespace quic { +namespace { + +void depthFirstTraverseDirectory(const std::string& dirname, std::vector& files) { + Envoy::Filesystem::Directory directory(dirname); + for (const Envoy::Filesystem::DirectoryEntry& entry : directory) { + switch (entry.type_) { + case Envoy::Filesystem::FileType::Regular: + files.push_back(absl::StrCat(dirname, "/", entry.name_)); + break; + case Envoy::Filesystem::FileType::Directory: + if (entry.name_ != "." && entry.name_ != "..") { + depthFirstTraverseDirectory(absl::StrCat(dirname, "/", entry.name_), files); + } + break; + default: + ASSERT(false, + absl::StrCat("Unknow file entry type ", entry.type_, " under directory ", dirname)); + } + } +} + +} // namespace + +// Traverses the directory |dirname| and returns all of the files it contains. +std::vector ReadFileContentsImpl(const std::string& dirname) { + std::vector files; + depthFirstTraverseDirectory(dirname, files); + return files; +} + +// Reads the contents of |filename| as a string into |contents|. +void ReadFileContentsImpl(QuicStringPiece filename, std::string* contents) { +#ifdef WIN32 + Envoy::Filesystem::InstanceImplWin32 fs; +#else + Envoy::Filesystem::InstanceImplPosix fs; +#endif + *contents = fs.fileReadToEnd(std::string(filename.data(), filename.size())); +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h new file mode 100644 index 0000000000000..ceef1dabbab2a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_file_utils_impl.h @@ -0,0 +1,28 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +#include "quiche/quic/platform/api/quic_string_piece.h" + +namespace quic { + +/** + * Traverses the directory |dirname| and returns all of the files it contains. + * @param dirname full path without trailing '/'. + */ +std::vector ReadFileContentsImpl(const std::string& dirname); + +/** + * Reads the contents of |filename| as a string into |contents|. + * @param filename the full path to the file. + * @param contents output location of the file content. + */ +void ReadFileContentsImpl(QuicStringPiece filename, std::string* contents); + +} // namespace quic diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 8f0b4ebf9e880..e22003edfeb52 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -4,9 +4,13 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include +#include + #include "test/extensions/transport_sockets/tls/ssl_test_utility.h" #include "test/test_common/environment.h" #include "test/test_common/logging.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -20,6 +24,7 @@ #include "quiche/quic/platform/api/quic_estimate_memory_usage.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_exported_stats.h" +#include "quiche/quic/platform/api/quic_file_utils.h" #include "quiche/quic/platform/api/quic_hostname_utils.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/platform/api/quic_map_util.h" @@ -478,5 +483,61 @@ TEST(QuicPlatformTest, QuicTestOutput) { QuicRecordTestOutput("quic_test_output.3", "output 3 content\n")); } +class FileUtilsTest : public testing::Test { +public: + FileUtilsTest() : dir_path_(Envoy::TestEnvironment::temporaryPath("envoy_test")) { + files_to_remove_.push(dir_path_); + } + +protected: + void SetUp() override { Envoy::TestUtility::createDirectory(dir_path_); } + + void TearDown() override { + while (!files_to_remove_.empty()) { + const std::string& f = files_to_remove_.top(); + Envoy::TestEnvironment::removePath(f); + files_to_remove_.pop(); + } + } + + void addSubDirs(std::list sub_dirs) { + for (const std::string& dir_name : sub_dirs) { + const std::string full_path = dir_path_ + "/" + dir_name; + Envoy::TestUtility::createDirectory(full_path); + files_to_remove_.push(full_path); + } + } + + void addFiles(std::list files) { + for (const std::string& file_name : files) { + const std::string full_path = dir_path_ + "/" + file_name; + { const std::ofstream file(full_path); } + files_to_remove_.push(full_path); + } + } + + const std::string dir_path_; + std::stack files_to_remove_; +}; + +TEST_F(FileUtilsTest, ReadDirContents) { + addSubDirs({"sub_dir1", "sub_dir2", "sub_dir1/sub_dir1_1"}); + addFiles({"file", "sub_dir1/sub_file1", "sub_dir1/sub_dir1_1/sub_file1_1", "sub_dir2/sub_file2"}); + + EXPECT_THAT(ReadFileContents(dir_path_), + testing::UnorderedElementsAre(dir_path_ + "/file", dir_path_ + "/sub_dir1/sub_file1", + dir_path_ + "/sub_dir1/sub_dir1_1/sub_file1_1", + dir_path_ + "/sub_dir2/sub_file2")); +} + +TEST_F(FileUtilsTest, ReadFileContents) { + const std::string data = "test string\ntest"; + const std::string file_path = + Envoy::TestEnvironment::writeStringToFileForTest("test_envoy", data); + std::string output; + ReadFileContents(file_path, &output); + EXPECT_EQ(data, output); +} + } // namespace } // namespace quic From 414f56f6b3ed5d99f3c23ee3bfee71c1905edeea Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 27 Mar 2019 09:01:18 -0400 Subject: [PATCH 024/165] test: refactoring how we add custom routes (#6382) Fixing up a TODO - fitting all route config options simply doesn't scale, so refactoring things so we don't have functions with infinite arguments. Risk Level: n/a (test only) Testing: integration test pass Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- test/config/utility.cc | 38 +++++-------- test/config/utility.h | 15 ++--- test/integration/http_integration.cc | 7 ++- test/integration/integration_test.cc | 18 +++--- test/integration/protocol_integration_test.cc | 56 +++++++++++-------- test/integration/redirect_integration_test.cc | 16 +++--- .../integration/websocket_integration_test.cc | 6 +- 7 files changed, 73 insertions(+), 83 deletions(-) diff --git a/test/config/utility.cc b/test/config/utility.cc index e09f189313a0b..207a9e98453c8 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -416,34 +416,24 @@ void ConfigHelper::setConnectTimeout(std::chrono::milliseconds timeout) { connect_timeout_set_ = true; } -void ConfigHelper::addRoute( - const std::string& domains, const std::string& prefix, const std::string& cluster, - bool validate_clusters, envoy::api::v2::route::RouteAction::ClusterNotFoundResponseCode code, - envoy::api::v2::route::VirtualHost::TlsRequirementType type, - envoy::api::v2::route::RetryPolicy retry_policy, bool include_attempt_count_header, - const absl::string_view upgrade, - envoy::api::v2::route::RouteAction::InternalRedirectAction internal_redirect_action) { +envoy::api::v2::route::VirtualHost +ConfigHelper::createVirtualHost(const char* domain, const char* prefix, const char* cluster) { + envoy::api::v2::route::VirtualHost virtual_host; + virtual_host.set_name(domain); + virtual_host.add_domains(domain); + virtual_host.add_routes()->mutable_match()->set_prefix(prefix); + auto* route = virtual_host.mutable_routes(0)->mutable_route(); + route->set_cluster(cluster); + return virtual_host; +} + +void ConfigHelper::addVirtualHost(const envoy::api::v2::route::VirtualHost& vhost) { RELEASE_ASSERT(!finalized_, ""); envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager hcm_config; loadHttpConnectionManager(hcm_config); - - auto* route_config = hcm_config.mutable_route_config(); - route_config->mutable_validate_clusters()->set_value(validate_clusters); + auto route_config = hcm_config.mutable_route_config(); auto* virtual_host = route_config->add_virtual_hosts(); - virtual_host->set_name(domains); - virtual_host->set_include_request_attempt_count(include_attempt_count_header); - virtual_host->add_domains(domains); - virtual_host->add_routes()->mutable_match()->set_prefix(prefix); - auto* route = virtual_host->mutable_routes(0)->mutable_route(); - route->set_cluster(cluster); - route->set_cluster_not_found_response_code(code); - route->mutable_retry_policy()->Swap(&retry_policy); - if (!upgrade.empty()) { - route->add_upgrade_configs()->set_upgrade_type(std::string(upgrade)); - } - route->set_internal_redirect_action(internal_redirect_action); - virtual_host->set_require_tls(type); - + virtual_host->CopyFrom(vhost); storeHttpConnectionManager(hcm_config); } diff --git a/test/config/utility.h b/test/config/utility.h index 5e9a4a0ac93f3..e1c00041894e8 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -111,17 +111,10 @@ class ConfigHelper { // Set the connect timeout on upstream connections. void setConnectTimeout(std::chrono::milliseconds timeout); - // TODO(alyssawilk) this does not scale. Refactor. - // Add an additional route to the configuration. - void addRoute(const std::string& host, const std::string& route, const std::string& cluster, - bool validate_clusters, - envoy::api::v2::route::RouteAction::ClusterNotFoundResponseCode code, - envoy::api::v2::route::VirtualHost::TlsRequirementType type = - envoy::api::v2::route::VirtualHost::NONE, - envoy::api::v2::route::RetryPolicy retry_policy = {}, - bool include_attempt_count_header = false, const absl::string_view upgrade = "", - const envoy::api::v2::route::RouteAction::InternalRedirectAction internal_action = - envoy::api::v2::route::RouteAction::PASS_THROUGH_INTERNAL_REDIRECT); + envoy::api::v2::route::VirtualHost createVirtualHost(const char* host, const char* route = "/", + const char* cluster = "cluster_0"); + + void addVirtualHost(const envoy::api::v2::route::VirtualHost& vhost); // Add an HTTP filter prior to existing filters. void addFilter(const std::string& filter_yaml); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index d8d7eb2951ada..d2d6e4d33fc78 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -579,9 +579,10 @@ void HttpIntegrationTest::testRetry() { // Tests that the x-envoy-attempt-count header is properly set on the upstream request // and updated after the request is retried. void HttpIntegrationTest::testRetryAttemptCountHeader() { - config_helper_.addRoute("host", "/test_retry", "cluster_0", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE, {}, true); + auto host = config_helper_.createVirtualHost("host", "/test_retry"); + host.set_include_request_attempt_count(true); + config_helper_.addVirtualHost(host); + initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 1cb548c9b9ae0..c6bfa5b5fe7f0 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -399,9 +399,9 @@ TEST_P(IntegrationTest, BadPath) { TEST_P(IntegrationTest, AbsolutePath) { // Configure www.redirect.com to send a redirect, and ensure the redirect is // encountered via absolute URL. - config_helper_.addRoute("www.redirect.com", "/", "cluster_0", true, - envoy::api::v2::route::RouteAction::SERVICE_UNAVAILABLE, - envoy::api::v2::route::VirtualHost::ALL); + auto host = config_helper_.createVirtualHost("www.redirect.com", "/"); + host.set_require_tls(envoy::api::v2::route::VirtualHost::ALL); + config_helper_.addVirtualHost(host); config_helper_.addConfigModifier(&setAllowAbsoluteUrl); initialize(); @@ -415,9 +415,9 @@ TEST_P(IntegrationTest, AbsolutePath) { TEST_P(IntegrationTest, AbsolutePathWithPort) { // Configure www.namewithport.com:1234 to send a redirect, and ensure the redirect is // encountered via absolute URL with a port. - config_helper_.addRoute("www.namewithport.com:1234", "/", "cluster_0", true, - envoy::api::v2::route::RouteAction::SERVICE_UNAVAILABLE, - envoy::api::v2::route::VirtualHost::ALL); + auto host = config_helper_.createVirtualHost("www.namewithport.com:1234", "/"); + host.set_require_tls(envoy::api::v2::route::VirtualHost::ALL); + config_helper_.addVirtualHost(host); config_helper_.addConfigModifier(&setAllowAbsoluteUrl); initialize(); std::string response; @@ -432,9 +432,9 @@ TEST_P(IntegrationTest, AbsolutePathWithoutPort) { config_helper_.setDefaultHostAndRoute("foo.com", "/found"); // Set a matcher for www.namewithport.com:1234 and verify http://www.namewithport.com does not // match - config_helper_.addRoute("www.namewithport.com:1234", "/", "cluster_0", true, - envoy::api::v2::route::RouteAction::SERVICE_UNAVAILABLE, - envoy::api::v2::route::VirtualHost::ALL); + auto host = config_helper_.createVirtualHost("www.namewithport.com:1234", "/"); + host.set_require_tls(envoy::api::v2::route::VirtualHost::ALL); + config_helper_.addVirtualHost(host); config_helper_.addConfigModifier(&setAllowAbsoluteUrl); initialize(); std::string response; diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 011266e9b8de9..33b9e1ac84823 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -41,6 +41,12 @@ using testing::Not; namespace Envoy { +void setDoNotValidateRouteConfig( + envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) { + auto* route_config = hcm.mutable_route_config(); + route_config->mutable_validate_clusters()->set_value(false); +}; + // Tests for DownstreamProtocolIntegrationTest will be run with all protocols // (H1/H2 downstream) but only H1 upstreams. // @@ -71,9 +77,11 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterNotFoundBodyNoBuffer) { // Add a route that uses unknown cluster (expect 404 Not Found). TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound404) { - config_helper_.addRoute("foo.com", "/unknown", "unknown_cluster", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE); + config_helper_.addConfigModifier(&setDoNotValidateRouteConfig); + auto host = config_helper_.createVirtualHost("foo.com", "/unknown", "unknown_cluster"); + host.mutable_routes(0)->mutable_route()->set_cluster_not_found_response_code( + envoy::api::v2::route::RouteAction::NOT_FOUND); + config_helper_.addVirtualHost(host); initialize(); BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( @@ -84,9 +92,11 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound404) { // Add a route that uses unknown cluster (expect 503 Service Unavailable). TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) { - config_helper_.addRoute("foo.com", "/unknown", "unknown_cluster", false, - envoy::api::v2::route::RouteAction::SERVICE_UNAVAILABLE, - envoy::api::v2::route::VirtualHost::NONE); + config_helper_.addConfigModifier(&setDoNotValidateRouteConfig); + auto host = config_helper_.createVirtualHost("foo.com", "/unknown", "unknown_cluster"); + host.mutable_routes(0)->mutable_route()->set_cluster_not_found_response_code( + envoy::api::v2::route::RouteAction::SERVICE_UNAVAILABLE); + config_helper_.addVirtualHost(host); initialize(); BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( @@ -97,9 +107,9 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) { // Add a route which redirects HTTP to HTTPS, and verify Envoy sends a 301 TEST_P(ProtocolIntegrationTest, RouterRedirect) { - config_helper_.addRoute("www.redirect.com", "/", "cluster_0", true, - envoy::api::v2::route::RouteAction::SERVICE_UNAVAILABLE, - envoy::api::v2::route::VirtualHost::ALL); + auto host = config_helper_.createVirtualHost("www.redirect.com", "/"); + host.set_require_tls(envoy::api::v2::route::VirtualHost::ALL); + config_helper_.addVirtualHost(host); initialize(); BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( @@ -230,9 +240,9 @@ TEST_P(ProtocolIntegrationTest, Retry) { // Tests that the x-envoy-attempt-count header is properly set on the upstream request // and updated after the request is retried. TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { - config_helper_.addRoute("host", "/test_retry", "cluster_0", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE, {}, true); + auto host = config_helper_.createVirtualHost("host", "/test_retry"); + host.set_include_request_attempt_count(true); + config_helper_.addVirtualHost(host); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = @@ -280,13 +290,12 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryPriority) { Registry::InjectFactory inject_factory(factory); - envoy::api::v2::route::RetryPolicy retry_policy; - retry_policy.mutable_retry_priority()->set_name(factory.name()); - // Add route with custom retry policy - config_helper_.addRoute("host", "/test_retry", "cluster_0", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE, retry_policy); + auto host = config_helper_.createVirtualHost("host", "/test_retry"); + host.set_include_request_attempt_count(true); + auto retry_policy = host.mutable_routes(0)->mutable_route()->mutable_retry_policy(); + retry_policy->mutable_retry_priority()->set_name(factory.name()); + config_helper_.addVirtualHost(host); // Use load assignments instead of static hosts. Necessary in order to use priorities. config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { @@ -356,13 +365,12 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryHostPredicateFilter) { TestHostPredicateFactory predicate_factory; Registry::InjectFactory inject_factory(predicate_factory); - envoy::api::v2::route::RetryPolicy retry_policy; - retry_policy.add_retry_host_predicate()->set_name(predicate_factory.name()); - // Add route with custom retry policy - config_helper_.addRoute("host", "/test_retry", "cluster_0", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE, retry_policy); + auto host = config_helper_.createVirtualHost("host", "/test_retry"); + host.set_include_request_attempt_count(true); + auto retry_policy = host.mutable_routes(0)->mutable_route()->mutable_retry_policy(); + retry_policy->add_retry_host_predicate()->set_name(predicate_factory.name()); + config_helper_.addVirtualHost(host); // We want to work with a cluster with two hosts. config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 91d48b3c7b947..e1a614557fc1c 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -7,15 +7,13 @@ class RedirectIntegrationTest : public HttpProtocolIntegrationTest { void initialize() override { envoy::api::v2::route::RetryPolicy retry_policy; - config_helper_.addRoute("pass.through.internal.redirect", "/", "cluster_0", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE, retry_policy, false, "", - envoy::api::v2::route::RouteAction::PASS_THROUGH_INTERNAL_REDIRECT); - - config_helper_.addRoute("handle.internal.redirect", "/", "cluster_0", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE, retry_policy, false, "", - envoy::api::v2::route::RouteAction::HANDLE_INTERNAL_REDIRECT); + auto pass_through = config_helper_.createVirtualHost("pass.through.internal.redirect"); + config_helper_.addVirtualHost(pass_through); + + auto handle = config_helper_.createVirtualHost("handle.internal.redirect"); + handle.mutable_routes(0)->mutable_route()->set_internal_redirect_action( + envoy::api::v2::route::RouteAction::HANDLE_INTERNAL_REDIRECT); + config_helper_.addVirtualHost(handle); HttpProtocolIntegrationTest::initialize(); } diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index 92d1f696403e3..d4b55d31e826d 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -319,9 +319,9 @@ TEST_P(WebsocketIntegrationTest, RouteSpecificUpgrade) { foo_upgrade->set_upgrade_type("foo"); foo_upgrade->mutable_enabled()->set_value(false); }); - config_helper_.addRoute("host", "/websocket/test", "cluster_0", false, - envoy::api::v2::route::RouteAction::NOT_FOUND, - envoy::api::v2::route::VirtualHost::NONE, {}, false, "foo"); + auto host = config_helper_.createVirtualHost("host", "/websocket/test"); + host.mutable_routes(0)->mutable_route()->add_upgrade_configs()->set_upgrade_type("foo"); + config_helper_.addVirtualHost(host); initialize(); performUpgrade(upgradeRequestHeaders("foo", 0), upgradeResponseHeaders("foo")); From fc5a3b2e1273cd7103d73071c01893c5d064dee0 Mon Sep 17 00:00:00 2001 From: moderation Date: Wed, 27 Mar 2019 08:36:09 -0800 Subject: [PATCH 025/165] Docs: Update reference configurations YAML templates for Struct deprecation for Any and hosts deprecation for load_assignment (#6368) Update examples for Struct deprecation for Any Risk Level: Low - generated configs only, no changes to code Testing: bazel build //configs:example_configs, bazel test //test/... Docs Changes: None required Release Notes: None required Fixes #6025 Replaces #6356 Related #6346 Signed-off-by: Michael Payne --- configs/envoy_double_proxy_v2.template.yaml | 75 ++++-- configs/envoy_front_proxy_v2.template.yaml | 62 +++-- .../envoy_service_to_service_v2.template.yaml | 229 +++++++++++------- 3 files changed, 238 insertions(+), 128 deletions(-) diff --git a/configs/envoy_double_proxy_v2.template.yaml b/configs/envoy_double_proxy_v2.template.yaml index 0d638a6fe85dc..2c08332f795d8 100644 --- a/configs/envoy_double_proxy_v2.template.yaml +++ b/configs/envoy_double_proxy_v2.template.yaml @@ -25,7 +25,8 @@ {%endif -%} filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: router route_config: @@ -42,14 +43,18 @@ timeout: 20s http_filters: - name: envoy.health_check - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: false - endpoint: /healthcheck - name: envoy.buffer - config: + headers: + - exact_match: /healthcheck + name: :path + - name: envoy.buffer + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes: 5242880 - name: envoy.router - config: {} + - name: envoy.router + typed_config: {} tracing: operation_name: INGRESS idle_timeout: 840s @@ -71,7 +76,8 @@ default_value: 1000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /var/log/envoy/access_error.log format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% if proxy_proto %} @@ -91,20 +97,30 @@ static_resources: type: STATIC connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8125 + load_assignment: + cluster_name: statsd + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8125 + protocol: TCP - name: backhaul type: STRICT_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: front-proxy.yourcompany.net - port_value: 9400 + load_assignment: + cluster_name: backhaul + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: front-proxy.yourcompany.net + port_value: 9400 + protocol: TCP # There are so few connections going back # that we can get some imbalance. Until we come up # with a better solution just limit the requests @@ -127,11 +143,16 @@ static_resources: type: LOGICAL_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: collector-grpc.lightstep.com - port_value: 443 + load_assignment: + cluster_name: lightstep_saas + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: collector-grpc.lightstep.com + port_value: 443 + protocol: TCP http2_protocol_options: {} tls_context: common_tls_context: @@ -143,12 +164,14 @@ static_resources: flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.statsd - config: + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink tcp_cluster_name: statsd tracing: http: name: envoy.lightstep - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig access_token_file: "/etc/envoy/lightstep_access_token" collector_cluster: lightstep_saas runtime: @@ -156,7 +179,7 @@ runtime: subdirectory: envoy override_subdirectory: envoy_override admin: - access_log_path: "var/log/envoy/admin_access.log" + access_log_path: "/var/log/envoy/admin_access.log" address: socket_address: protocol: TCP diff --git a/configs/envoy_front_proxy_v2.template.yaml b/configs/envoy_front_proxy_v2.template.yaml index ef44b641ab609..35f734f80ad2e 100644 --- a/configs/envoy_front_proxy_v2.template.yaml +++ b/configs/envoy_front_proxy_v2.template.yaml @@ -31,7 +31,8 @@ {%endif %} filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: router {% if proxy_proto -%} @@ -42,13 +43,15 @@ {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }} http_filters: - name: envoy.health_check - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: false headers: - name: ":path" exact_match: "/healthcheck" - name: envoy.buffer - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes: 5242880 - name: envoy.rate_limit config: @@ -59,7 +62,7 @@ envoy_grpc: cluster_name: ratelimit - name: envoy.router - config: {} + typed_config: {} add_user_agent: true tracing: operation_name: INGRESS @@ -82,7 +85,8 @@ default_value: 1000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/access_error.log" format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% endmacro -%} @@ -100,29 +104,44 @@ static_resources: type: STRICT_DNS connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: disccovery.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: sds + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: discovery.yourcompany.net + port_value: 80 + protocol: TCP - name: statsd type: STATIC connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8125 + load_assignment: + cluster_name: statsd + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8125 + protocol: TCP - name: lightstep_saas type: LOGICAL_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: collector-grpc.lightstep.com - port_value: 443 + load_assignment: + cluster_name: lightstep_saas + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: collector-grpc.lightstep.com + port_value: 443 + protocol: TCP http2_protocol_options: {} {% for service, options in clusters.items() -%} - {{ helper.internal_cluster_definition(service, options)|indent(2) }} @@ -134,7 +153,8 @@ flags_path: /etc/envoy/flags tracing: http: name: envoy.lightstep - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig collector_cluster: lightstep_saas access_token_file: "/etc/envoy/lightstep_access_token" runtime: diff --git a/configs/envoy_service_to_service_v2.template.yaml b/configs/envoy_service_to_service_v2.template.yaml index e6b40b734ff77..083a8c39a2926 100644 --- a/configs/envoy_service_to_service_v2.template.yaml +++ b/configs/envoy_service_to_service_v2.template.yaml @@ -9,7 +9,8 @@ filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: ingress_http route_config: @@ -32,22 +33,25 @@ cluster: local_service http_filters: - name: envoy.health_check - config: - pass_through_mode: true - headers: - - name: ":path" - exact_match: "/healthcheck" - cache_time: 2.5s + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + pass_through_mode: true + headers: + - name: ":path" + exact_match: "/healthcheck" + cache_time: 2.5s - name: envoy.buffer - config: - max_request_bytes: 5242880 + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + max_request_bytes: 5242880 - name: envoy.router - config: {} + typed_config: {} access_log: - name: envoy.file_access_log filter: not_health_check_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http.log" {{ access_log_helper.ingress_full()|indent(10)}} - name: envoy.file_access_log @@ -75,7 +79,8 @@ default_value: 2000 runtime_key: access_log.access_error.duration - not_health_check_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http_error.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} - name: envoy.file_access_log @@ -85,7 +90,8 @@ - not_health_check_filter: {} - runtime_filter: runtime_key: access_log.ingress_http - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http_sampled.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} idle_timeout: 840s @@ -103,7 +109,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http route_config: @@ -141,9 +148,10 @@ static_resources: default_value: 2000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_http_error.log" - {{ access_log_helper.egress_error_log()|indent(10)}} + {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true http_filters: - name: envoy.rate_limit @@ -154,9 +162,9 @@ static_resources: envoy_grpc: cluster_name: ratelimit - name: envoy.grpc_http1_bridge - config: {} + typed_config: {} - name: envoy.router - config: {} + typed_config: {} - address: socket_address: @@ -166,7 +174,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http rds: @@ -199,7 +208,8 @@ static_resources: default_value: 2000 runtime_key: access_log.access_error.duration - traceable_filter: {} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true @@ -212,9 +222,9 @@ static_resources: envoy_grpc: cluster_name: ratelimit - name: envoy.grpc_http1_bridge - config: {} + typed_config: {} - name: envoy.router - config: {} + typed_config: {} {% if external_virtual_hosts|length > 0 or mongos_servers|length > 0 %}{% endif -%} {% for mapping in external_virtual_hosts -%} - name: "{{ mapping['address']}}" @@ -226,7 +236,8 @@ static_resources: filter_chains: - filters: - name: envoy.http_connection_manager - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO idle_timeout: 840s stat_prefix: egress_{{ mapping['name'] }} @@ -251,10 +262,10 @@ static_resources: http_filters: {% if mapping['name'] in ['dynamodb_iad', 'dynamodb_legacy'] -%} - name: envoy.http_dynamo_filter - config: {} + typed_config: {} {% endif -%} - name: envoy.router - config: {} + typed_config: {} access_log: - name: envoy.file_access_log filter: @@ -280,7 +291,8 @@ static_resources: default_value: 2000 runtime_key: access_log.access_error.duration {% endif %} - config: + typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log" {% if mapping.get('is_amzn_service', False) -%} {{ access_log_helper.egress_error_amazon_service()|indent(10) }} @@ -299,7 +311,8 @@ static_resources: filter_chains: - filters: - name: envoy.tcp_proxy - config: + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy stat_prefix: mongo_{{ key }} cluster: mongo_{{ key }} - name: envoy.mongo_proxy @@ -342,11 +355,16 @@ static_resources: {% endif %} type: LOGICAL_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: {{ host['remote_address'] }} - port_value: {{ host['port_value'] }} - protocol: {{ host['protocol'] }} + load_assignment: + cluster_name: egress_{{ host['name'] }} + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {{ host['remote_address'] }} + port_value: {{ host['port_value'] }} + protocol: {{ host['protocol'] }} {% endfor -%} {% endfor -%} {% for key, value in mongos_servers.items() -%} @@ -354,13 +372,18 @@ static_resources: connect_timeout: 0.25s type: STRICT_DNS lb_policy: RANDOM - hosts: - {% for server in value['hosts'] -%} - - socket_address: - protocol: {{ server['protocol'] }} - port_value: {{ server['port_value'] }} - address: {{ server['address'] }} - {% endfor -%} + load_assignment: + cluster_name: mongo_{{ key }} + endpoints: + - lb_endpoints: + {% for server in value['hosts'] -%} + - endpoint: + address: + socket_address: + address: {{ server['address'] }} + port_value: {{ server['port_value'] }} + protocol: {{ server['protocol'] }} + {% endfor -%} {% endfor %} - name: main_website connect_timeout: 0.25s @@ -368,20 +391,32 @@ static_resources: # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: main_website.com - port_value: 443 - tls_context: { sni: www.main_website.com } + load_assignment: + cluster_name: main_website + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: main_website.com + port_value: 443 + protocol: TCP + tls_context: + sni: www.main_website.com - name: local_service connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: main_website + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 + protocol: TCP circuit_breakers: thresholds: max_pending_requests: 30 @@ -391,11 +426,16 @@ static_resources: type: STATIC lb_policy: ROUND_ROBIN http2_protocol_options: {} - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8081 + load_assignment: + cluster_name: local_service_grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8081 + protocol: TCP circuit_breakers: thresholds: max_requests: 200 @@ -404,31 +444,46 @@ static_resources: connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: rds.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: local_service_grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: rds.yourcompany.net + port_value: 80 + protocol: TCP dns_lookup_family: V4_ONLY - name: statsd connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8125 + load_assignment: + cluster_name: statsd + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8125 + protocol: TCP dns_lookup_family: V4_ONLY - name: lightstep_saas connect_timeout: 1s type: LOGICAL_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: collector-grpc.lightstep.com - port_value: 443 + load_assignment: + cluster_name: lightstep_saas + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: collector-grpc.lightstep.com + port_value: 443 + protocol: TCP http2_protocol_options: max_concurrent_streams: 100 tls_context: @@ -442,20 +497,30 @@ static_resources: connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: cds.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: cds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: cds.yourcompany.net + port_value: 80 + protocol: TCP - name: sds connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: - - socket_address: - protocol: TCP - address: discovery.yourcompany.net - port_value: 80 + load_assignment: + cluster_name: sds + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: discovery.yourcompany.net + port_value: 80 + protocol: TCP dynamic_resources: cds_config: api_config_source: @@ -467,13 +532,15 @@ cluster_manager: {} flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.statsd - config: + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink tcp_cluster_name: statsd watchdog: {} tracing: http: name: envoy.lightstep - config: + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig access_token_file: "/etc/envoy/lightstep_access_token" collector_cluster: lightstep_saas runtime: From 8ba28c301757cbe202b62b045b989e5b794a5b6b Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 27 Mar 2019 12:40:22 -0400 Subject: [PATCH 026/165] api: reserve HCM field for pending security fix. (#6397) Signed-off-by: Harvey Tuch --- .../http_connection_manager/v2/http_connection_manager.proto | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 627082314dc49..3f7620d2bc3df 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -24,7 +24,7 @@ import "gogoproto/gogo.proto"; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. -// [#comment:next free field: 30] +// [#comment:next free field: 31] message HttpConnectionManager { enum CodecType { option (gogoproto.goproto_enum_prefix) = false; @@ -378,6 +378,9 @@ message HttpConnectionManager { repeated UpgradeConfig upgrade_configs = 23; reserved 27; + + // This is reserved for a pending security fix. + reserved 30; } message Rds { From 56559002516310fd0cd5791c3cb10c69f742df7b Mon Sep 17 00:00:00 2001 From: Piotr Kufel Date: Wed, 27 Mar 2019 09:47:11 -0700 Subject: [PATCH 027/165] Support google_default channel credentials (#6366) Support google_default in channel credentials configuration. The documentation mentions this option and yet it's ignored. Risk Level: Low, the option was seemingly useless/unused. If anybody relies on it doing nothing, they can just unset it. Testing: Tried running my own envoy, seemed to pick up the credentials pointed to be GOOGLE_APPLICATION_CREDENTIALS environment variable. Signed-off-by: qfel --- source/common/grpc/google_grpc_creds_impl.cc | 3 +++ test/common/grpc/BUILD | 1 + test/common/grpc/google_grpc_creds_test.cc | 11 +++++++++++ test/common/grpc/service_key.json | 12 ++++++++++++ 4 files changed, 27 insertions(+) create mode 100644 test/common/grpc/service_key.json diff --git a/source/common/grpc/google_grpc_creds_impl.cc b/source/common/grpc/google_grpc_creds_impl.cc index 30f6ef3a04090..d73ad3cb59997 100644 --- a/source/common/grpc/google_grpc_creds_impl.cc +++ b/source/common/grpc/google_grpc_creds_impl.cc @@ -25,6 +25,9 @@ std::shared_ptr CredsUtility::getChannelCredentials( case envoy::api::v2::core::GrpcService::GoogleGrpc::ChannelCredentials::kLocalCredentials: { return grpc::experimental::LocalCredentials(UDS); } + case envoy::api::v2::core::GrpcService::GoogleGrpc::ChannelCredentials::kGoogleDefault: { + return grpc::GoogleDefaultCredentials(); + } default: return nullptr; } diff --git a/test/common/grpc/BUILD b/test/common/grpc/BUILD index 46657034b1787..549801ad5e7dd 100644 --- a/test/common/grpc/BUILD +++ b/test/common/grpc/BUILD @@ -78,6 +78,7 @@ envoy_cc_test( envoy_cc_test( name = "google_grpc_creds_test", srcs = envoy_select_google_grpc(["google_grpc_creds_test.cc"]), + data = [":service_key.json"], deps = [ ":utility_lib", "//test/mocks/stats:stats_mocks", diff --git a/test/common/grpc/google_grpc_creds_test.cc b/test/common/grpc/google_grpc_creds_test.cc index ca60d89971d45..819b758e1614c 100644 --- a/test/common/grpc/google_grpc_creds_test.cc +++ b/test/common/grpc/google_grpc_creds_test.cc @@ -1,7 +1,10 @@ +#include + #include "common/grpc/google_grpc_creds_impl.h" #include "test/common/grpc/utility.h" #include "test/mocks/stats/mocks.h" +#include "test/test_common/environment.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -31,6 +34,14 @@ TEST_F(CredsUtilityTest, GetChannelCredentials) { EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_)); creds->mutable_local_credentials(); EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_)); + + const char var_name[] = "GOOGLE_APPLICATION_CREDENTIALS"; + EXPECT_EQ(nullptr, ::getenv(var_name)); + const auto creds_path = TestEnvironment::runfilesPath("test/common/grpc/service_key.json"); + ::setenv(var_name, creds_path.c_str(), 0); + creds->mutable_google_default(); + EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_)); + ::unsetenv(var_name); } TEST_F(CredsUtilityTest, DefaultSslChannelCredentials) { diff --git a/test/common/grpc/service_key.json b/test/common/grpc/service_key.json new file mode 100644 index 0000000000000..0e91dfe83bc7d --- /dev/null +++ b/test/common/grpc/service_key.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "teset-project", + "private_key_id": "xxx", + "private_key": "-----BEGIN PRIVATE KEY-----\nspUMkfFsoTfa\n-----END PRIVATE KEY-----\n", + "client_email": "test@test.iam.gserviceaccount.com", + "client_id": "42", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%test-dev.iam.gserviceaccount.com" +} From 9a82dee216f94109140b05c0e6ee0f721ad8bc18 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Wed, 27 Mar 2019 11:09:59 -0700 Subject: [PATCH 028/165] upstream: fix bug causing crashes during priority host moves (#6396) This fixes a bug where hosts that were moved between priorities would not be included in the hosts_added vector, resulting in crashes if the same host was moved multiple times when used with active health checking: if a host was moved between priorities twice, it would first get removed from the health checker, then on the second move the health checker would crash as it would attempt to remove a host it didn't know about. We fix this by explicitly adding the existing host to the list of added hosts iff the host was previously in a different priority. Uncovering this bug lead to the discovery of a bug in the batch updating done during EDS: std::set_difference assumes that the provided ranges are both *sorted*, which is not generally true during this update flow. This meant that the filtering of hosts that were added/removed did not work correctly, and would produce inconsistent result dependent on the ordering of the host pointers in the unordered_map. We fix this by using a standard for loop instead of std::set_difference. Not only is this more correct, it should also be faster for large sets as it performs the filtering in O(n) instead of O(n^2). Signed-off-by: Snow Pettersen --- source/common/upstream/upstream_impl.cc | 8 ++++++-- test/common/upstream/eds_test.cc | 6 ++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index a50b650f4be87..988098842a0e6 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -203,8 +203,11 @@ HostVector filterHosts(const std::unordered_set& hosts, HostVector net_hosts; net_hosts.reserve(hosts.size()); - std::set_difference(hosts.begin(), hosts.end(), excluded_hosts.begin(), excluded_hosts.end(), - std::inserter(net_hosts, net_hosts.begin())); + for (const auto& host : hosts) { + if (excluded_hosts.find(host) == excluded_hosts.end()) { + net_hosts.emplace_back(host); + } + } return net_hosts; } @@ -1144,6 +1147,7 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, // Did the priority change? if (host->priority() != existing_host->second->priority()) { existing_host->second->priority(host->priority()); + hosts_added_to_current_priority.emplace_back(existing_host->second); } existing_host->second->weight(host->weight()); diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index bb22655fda823..c3e640e637144 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -680,6 +680,12 @@ TEST_F(EdsTest, EndpointMoved) { add_endpoint(81, 0); add_endpoint(80, 1); + // Verify that no hosts gets added or removed to/from the PrioritySet. + cluster_->prioritySet().addMemberUpdateCb([&](const auto& added, const auto& removed) { + EXPECT_TRUE(added.empty()); + EXPECT_TRUE(removed.empty()); + }); + VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); { From bacd89e866b4d81dd316613ce11c0b9c678cc421 Mon Sep 17 00:00:00 2001 From: Nicolas Flacco <47160394+FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg@users.noreply.github.com> Date: Wed, 27 Mar 2019 14:04:34 -0700 Subject: [PATCH 029/165] Revert "redis: prefixed routing (#5658)" (#6401) The change breaks the existing Redis operation, for example redis-cli -p [WHATEVER] GET 1 crashes Envoy. This reverts commit 046e98904f6df60f0c548ffe77ffb5f5f980179d. Signed-off-by: Nicolas Flacco --- DEPRECATED.md | 1 - .../network/redis_proxy/v2/redis_proxy.proto | 63 +----- docs/root/intro/arch_overview/redis.rst | 5 +- docs/root/intro/version_history.rst | 1 - source/common/common/utility.h | 34 +-- .../filters/network/redis_proxy/BUILD | 29 +-- .../redis_proxy/command_splitter_impl.cc | 41 ++-- .../redis_proxy/command_splitter_impl.h | 30 +-- .../filters/network/redis_proxy/config.cc | 43 +--- .../filters/network/redis_proxy/conn_pool.h | 2 +- .../network/redis_proxy/conn_pool_impl.h | 1 + .../network/redis_proxy/proxy_filter.cc | 2 +- .../network/redis_proxy/proxy_filter.h | 1 + .../filters/network/redis_proxy/router.h | 42 ---- .../network/redis_proxy/router_impl.cc | 68 ------ .../filters/network/redis_proxy/router_impl.h | 55 ----- test/common/common/utility_test.cc | 37 ---- .../filters/network/redis_proxy/BUILD | 13 -- .../redis_proxy/command_lookup_speed_test.cc | 8 +- .../redis_proxy/command_splitter_impl_test.cc | 24 ++- .../network/redis_proxy/config_test.cc | 15 -- .../redis_proxy/conn_pool_impl_test.cc | 5 +- .../filters/network/redis_proxy/mocks.cc | 3 - .../filters/network/redis_proxy/mocks.h | 12 -- .../network/redis_proxy/proxy_filter_test.cc | 2 +- .../network/redis_proxy/router_impl_test.cc | 199 ------------------ 26 files changed, 78 insertions(+), 658 deletions(-) delete mode 100644 source/extensions/filters/network/redis_proxy/router.h delete mode 100644 source/extensions/filters/network/redis_proxy/router_impl.cc delete mode 100644 source/extensions/filters/network/redis_proxy/router_impl.h delete mode 100644 test/extensions/filters/network/redis_proxy/router_impl_test.cc diff --git a/DEPRECATED.md b/DEPRECATED.md index 502987c09035d..9743d8d3a910b 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -15,7 +15,6 @@ A logged warning is expected for each deprecated item that is in deprecation win [fault.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/fault/v2/fault.proto)) has been deprecated. It was never used and setting it has no effect. It will be removed in the following release. -* Use of `cluster`, found in [redis-proxy.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto) is deprecated. Set a `PrefixRoutes.catch_all_cluster` instead. ## Version 1.9.0 (Dec 20, 2018) diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 696bf26b8b5c9..cd8c18b128755 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -22,13 +22,7 @@ message RedisProxy { // Name of cluster from cluster manager. See the :ref:`configuration section // ` of the architecture overview for recommendations on // configuring the backing cluster. - // - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch-all - // cluster` - // instead. - string cluster = 2 [deprecated = true]; + string cluster = 2 [(validate.rules).string.min_bytes = 1]; // Redis connection pool settings. message ConnPoolSettings { @@ -54,63 +48,10 @@ message RedisProxy { bool enable_hashtagging = 2; } - // Network settings for the connection pool to the upstream clusters. + // Network settings for the connection pool to the upstream cluster. ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; // Indicates that latency stat should be computed in microseconds. By default it is computed in // milliseconds. bool latency_in_micros = 4; - - message PrefixRoutes { - message Route { - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string.min_bytes = 1]; - } - - // List of prefix routes. - repeated Route routes = 1 [(gogoproto.nullable) = false]; - - // Indicates that prefix matching should be case insensitive. - bool case_insensitive = 2; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - string catch_all_cluster = 3; - } - - // List of **unique** prefixes used to separate keys from different workloads to different - // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all - // cluster can be used to forward commands when there is no match. Time complexity of the - // lookups are in O(min(longest key prefix, key length)). - // - // Example: - // - // .. code-block:: yaml - // - // prefix_routes: - // routes: - // - prefix: "ab" - // cluster: "cluster_a" - // - prefix: "abc" - // cluster: "cluster_b" - // - // When using the above routes, the following prefixes would be sent to: - // - // * 'get abc:users' would retrive the key 'abc:users' from cluster_b. - // * 'get ab:users' would retrive the key 'ab:users' from cluster_a. - // * 'get z:users' would return a NoUpstreamHost error. A :ref:`catch-all - // cluster` - // would have retrieved the key from that cluster instead. - // - // See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing clusters. - PrefixRoutes prefix_routes = 5 [(gogoproto.nullable) = false]; } diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst index b3aa16565ad78..044ea66553726 100644 --- a/docs/root/intro/arch_overview/redis.rst +++ b/docs/root/intro/arch_overview/redis.rst @@ -8,9 +8,7 @@ In this mode, the goals of Envoy are to maintain availability and partition tole over consistency. This is the key point when comparing Envoy to `Redis Cluster `_. Envoy is designed as a best-effort cache, meaning that it will not try to reconcile inconsistent data or keep a globally consistent -view of cluster membership. It also supports routing commands from different workload to -different to different upstream clusters based on their access patterns, eviction, or isolation -requirements. +view of cluster membership. The Redis project offers a thorough reference on partitioning as it relates to Redis. See "`Partitioning: how to split data among multiple Redis instances @@ -24,7 +22,6 @@ The Redis project offers a thorough reference on partitioning as it relates to R * Detailed command statistics. * Active and passive healthchecking. * Hash tagging. -* Prefix routing. **Planned future enhancements**: diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 8032b072d2f00..b601e6f1dad62 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -53,7 +53,6 @@ Version history * ratelimit: removed deprecated rate limit configuration from bootstrap. * redis: added :ref:`hashtagging ` to guarantee a given key's upstream. * redis: added :ref:`latency stats ` for commands. -* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. * redis: added :ref:`success and error stats ` for commands. * redis: migrate hash function for host selection to `MurmurHash2 `_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. * redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. diff --git a/source/common/common/utility.h b/source/common/common/utility.h index 9eaddb7f64da1..785df6d8aa404 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -568,11 +568,8 @@ template struct TrieLookupTable { * Adds an entry to the Trie at the given Key. * @param key the key used to add the entry. * @param value the value to be associated with the key. - * @param overwrite_existing will overwrite the value when the value for a given key already - * exists. - * @return false when a value already exists for the given key. */ - bool add(const char* key, Value value, bool overwrite_existing = true) { + void add(const char* key, Value value) { TrieEntry* current = &root_; while (uint8_t c = *key) { if (!current->entries_[c]) { @@ -581,11 +578,7 @@ template struct TrieLookupTable { current = current->entries_[c].get(); key++; } - if (current->value_ && !overwrite_existing) { - return false; - } current->value_ = value; - return true; } /** @@ -606,31 +599,6 @@ template struct TrieLookupTable { return current->value_; } - /** - * Finds the entry associated with the longest prefix. Complexity is O(min(longest key prefix, key - * length)) - * @param key the key used to find. - * @return the value matching the longest prefix based on the key. - */ - Value findLongestPrefix(const char* key) const { - const TrieEntry* current = &root_; - const TrieEntry* result = nullptr; - while (uint8_t c = *key) { - if (current->value_) { - result = current; - } - - // https://github.com/facebook/mcrouter/blob/master/mcrouter/lib/fbi/cpp/Trie-inl.h#L126-L143 - current = current->entries_[c].get(); - if (current == nullptr) { - return result ? result->value_ : nullptr; - } - - key++; - } - return current ? current->value_ : result->value_; - } - TrieEntry root_; }; diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 911edafb83684..8cd0a234462e0 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -30,22 +30,13 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "router_interface", - hdrs = ["router.h"], - deps = [ - ":conn_pool_interface", - "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", - ], -) - envoy_cc_library( name = "command_splitter_lib", srcs = ["command_splitter_impl.cc"], hdrs = ["command_splitter_impl.h"], deps = [ ":command_splitter_interface", - ":router_interface", + ":conn_pool_interface", "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan", "//source/common/common:assert_lib", @@ -63,6 +54,7 @@ envoy_cc_library( hdrs = ["conn_pool_impl.h"], deps = [ ":conn_pool_interface", + "//include/envoy/router:router_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/buffer:buffer_lib", @@ -81,7 +73,6 @@ envoy_cc_library( hdrs = ["proxy_filter.h"], deps = [ ":command_splitter_interface", - ":router_interface", "//include/envoy/network:drain_decision_interface", "//include/envoy/network:filter_interface", "//include/envoy/upstream:cluster_manager_interface", @@ -104,21 +95,7 @@ envoy_cc_library( "//source/extensions/filters/network/common:factory_base_lib", "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", - "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", - "//source/extensions/filters/network/redis_proxy:router_lib", - ], -) - -envoy_cc_library( - name = "router_lib", - srcs = ["router_impl.cc"], - hdrs = ["router_impl.h"], - deps = [ - ":router_interface", - "//include/envoy/thread_local:thread_local_interface", - "//include/envoy/upstream:cluster_manager_interface", - "//source/common/common:to_lower_table_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", - "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", + "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", ], ) diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index 415a754e0ac6a..beea0fbaa32ee 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -59,15 +59,15 @@ void SingleServerRequest::cancel() { handle_ = nullptr; } -SplitRequestPtr SimpleRequest::create(Router& router, +SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ new SimpleRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->handle_ = - router.makeRequest(incoming_request.asArray()[1].asString(), incoming_request, *request_ptr); + request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[1].asString(), + incoming_request, *request_ptr); if (!request_ptr->handle_) { request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); return nullptr; @@ -76,7 +76,7 @@ SplitRequestPtr SimpleRequest::create(Router& router, return std::move(request_ptr); } -SplitRequestPtr EvalRequest::create(Router& router, +SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { @@ -91,8 +91,8 @@ SplitRequestPtr EvalRequest::create(Router& router, std::unique_ptr request_ptr{ new EvalRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->handle_ = - router.makeRequest(incoming_request.asArray()[3].asString(), incoming_request, *request_ptr); + request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[3].asString(), + incoming_request, *request_ptr); if (!request_ptr->handle_) { command_stats.error_.inc(); request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); @@ -123,7 +123,7 @@ void FragmentedRequest::onChildFailure(uint32_t index) { onChildResponse(Utility::makeError(Response::get().UpstreamFailure), index); } -SplitRequestPtr MGETRequest::create(Router& router, +SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { @@ -152,8 +152,8 @@ SplitRequestPtr MGETRequest::create(Router& router, single_mget.asArray()[1].asString() = incoming_request.asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel get: '{}'", single_mget.toString()); - pending_request.handle_ = - router.makeRequest(incoming_request.asArray()[i].asString(), single_mget, pending_request); + pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), + single_mget, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -195,7 +195,7 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -SplitRequestPtr MSETRequest::create(Router& router, +SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { @@ -231,8 +231,8 @@ SplitRequestPtr MSETRequest::create(Router& router, single_mset.asArray()[2].asString() = incoming_request.asArray()[i + 1].asString(); ENVOY_LOG(debug, "redis: parallel set: '{}'", single_mset.toString()); - pending_request.handle_ = - router.makeRequest(incoming_request.asArray()[i].asString(), single_mset, pending_request); + pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), + single_mset, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -270,7 +270,7 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -SplitRequestPtr SplitKeysSumResultRequest::create(Router& router, +SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, @@ -299,8 +299,8 @@ SplitRequestPtr SplitKeysSumResultRequest::create(Router& router, single_fragment.asArray()[1].asString() = incoming_request.asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request.asArray()[0].asString(), single_fragment.toString()); - pending_request.handle_ = router.makeRequest(incoming_request.asArray()[i].asString(), - single_fragment, pending_request); + pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), + single_fragment, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -337,11 +337,12 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va } } -InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, - TimeSource& time_source, bool latency_in_micros) - : router_(std::move(router)), simple_command_handler_(*router_), - eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_), - split_keys_sum_result_handler_(*router_), +InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, + const std::string& stat_prefix, TimeSource& time_source, + bool latency_in_micros) + : conn_pool_(std::move(conn_pool)), simple_command_handler_(*conn_pool_), + eval_command_handler_(*conn_pool_), mget_handler_(*conn_pool_), mset_handler_(*conn_pool_), + split_keys_sum_result_handler_(*conn_pool_), stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))}, latency_in_micros_(latency_in_micros), time_source_(time_source) { for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 45ac46b71cd37..b7ac2b90f409b 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -17,7 +17,6 @@ #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" -#include "extensions/filters/network/redis_proxy/router.h" namespace Envoy { namespace Extensions { @@ -69,9 +68,9 @@ class CommandHandler { class CommandHandlerBase { protected: - CommandHandlerBase(Router& router) : router_(router) {} + CommandHandlerBase(ConnPool::Instance& conn_pool) : conn_pool_(conn_pool) {} - Router& router_; + ConnPool::Instance& conn_pool_; }; class SplitRequestBase : public SplitRequest { @@ -122,7 +121,8 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien */ class SimpleRequest : public SingleServerRequest { public: - static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, + const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -137,7 +137,8 @@ class SimpleRequest : public SingleServerRequest { */ class EvalRequest : public SingleServerRequest { public: - static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, + const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -194,7 +195,8 @@ class FragmentedRequest : public SplitRequestBase { */ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, + const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -215,7 +217,8 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, + const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -237,7 +240,8 @@ class SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(Router& router, const Common::Redis::RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, + const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -257,11 +261,11 @@ class MSETRequest : public FragmentedRequest, Logger::Loggable class CommandHandlerFactory : public CommandHandler, CommandHandlerBase { public: - CommandHandlerFactory(Router& router) : CommandHandlerBase(router) {} + CommandHandlerFactory(ConnPool::Instance& conn_pool) : CommandHandlerBase(conn_pool) {} SplitRequestPtr startRequest(const Common::Redis::RespValue& request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { - return RequestClass::create(router_, request, callbacks, command_stats, time_source, + return RequestClass::create(conn_pool_, request, callbacks, command_stats, time_source, latency_in_micros); } }; @@ -284,8 +288,8 @@ struct InstanceStats { class InstanceImpl : public Instance, Logger::Loggable { public: - InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, - TimeSource& time_source, bool latency_in_micros); + InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, + const std::string& stat_prefix, TimeSource& time_source, bool latency_in_micros); // RedisProxy::CommandSplitter::Instance SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, @@ -303,7 +307,7 @@ class InstanceImpl : public Instance, Logger::Loggable { CommandHandler& handler); void onInvalidRequest(SplitCallbacks& callbacks); - RouterPtr router_; + ConnPool::InstancePtr conn_pool_; CommandHandlerFactory simple_command_handler_; CommandHandlerFactory eval_command_handler_; CommandHandlerFactory mget_handler_; diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index 9838c2cc5ebf4..bae74e8633713 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -11,8 +11,8 @@ #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" +#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" #include "extensions/filters/network/redis_proxy/proxy_filter.h" -#include "extensions/filters/network/redis_proxy/router_impl.h" namespace Envoy { namespace Extensions { @@ -24,43 +24,18 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP Server::Configuration::FactoryContext& context) { ASSERT(!proto_config.stat_prefix().empty()); + ASSERT(!proto_config.cluster().empty()); ASSERT(proto_config.has_settings()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, context.scope(), context.drainDecision(), context.runtime())); - - envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes prefix_routes( - proto_config.prefix_routes()); - - // set the catch-all route from the deprecated cluster and settings parameters. - if (prefix_routes.catch_all_cluster().empty() && prefix_routes.routes_size() == 0) { - if (proto_config.cluster().empty()) { - throw EnvoyException("cannot configure a redis-proxy without any upstream"); - } - - prefix_routes.set_catch_all_cluster(proto_config.cluster()); - } - - std::set unique_clusters; - for (auto& route : prefix_routes.routes()) { - unique_clusters.emplace(route.cluster()); - } - unique_clusters.emplace(prefix_routes.catch_all_cluster()); - - Upstreams upstreams; - for (auto& cluster : unique_clusters) { - upstreams.emplace(cluster, std::make_shared( - cluster, context.clusterManager(), - Common::Redis::Client::ClientFactoryImpl::instance_, - context.threadLocal(), proto_config.settings())); - } - - auto router = std::make_unique(prefix_routes, std::move(upstreams)); - - std::shared_ptr splitter = - std::make_shared( - std::move(router), context.scope(), filter_config->stat_prefix_, context.timeSource(), - proto_config.latency_in_micros()); + ConnPool::InstancePtr conn_pool( + new ConnPool::InstanceImpl(filter_config->cluster_name_, context.clusterManager(), + Common::Redis::Client::ClientFactoryImpl::instance_, + context.threadLocal(), proto_config.settings())); + std::shared_ptr splitter(new CommandSplitter::InstanceImpl( + std::move(conn_pool), context.scope(), filter_config->stat_prefix_, context.timeSource(), + proto_config.latency_in_micros())); return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { Common::Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index 713e4f7310cc5..442219e79b547 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -36,7 +36,7 @@ class Instance { Common::Redis::Client::PoolCallbacks& callbacks) PURE; }; -typedef std::shared_ptr InstanceSharedPtr; +typedef std::unique_ptr InstancePtr; } // namespace ConnPool } // namespace RedisProxy diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index 17facb93afbc4..1dfb363573ab2 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -37,6 +37,7 @@ class InstanceImpl : public Instance { const std::string& cluster_name, Upstream::ClusterManager& cm, Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls, const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); + // RedisProxy::ConnPool::Instance Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, const Common::Redis::RespValue& request, diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index f36d692ea9cae..d5fc143e9be09 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -17,7 +17,7 @@ namespace RedisProxy { ProxyFilterConfig::ProxyFilterConfig( const envoy::config::filter::network::redis_proxy::v2::RedisProxy& config, Stats::Scope& scope, const Network::DrainDecision& drain_decision, Runtime::Loader& runtime) - : drain_decision_(drain_decision), runtime_(runtime), + : drain_decision_(drain_decision), runtime_(runtime), cluster_name_(config.cluster()), stat_prefix_(fmt::format("redis.{}.", config.stat_prefix())), stats_(generateStats(stat_prefix_, scope)) {} diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index ae2141a322d94..3f8dc62d6eecd 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -56,6 +56,7 @@ class ProxyFilterConfig { const Network::DrainDecision& drain_decision_; Runtime::Loader& runtime_; + const std::string cluster_name_; const std::string stat_prefix_; const std::string redis_drain_close_runtime_key_{"redis.drain_close_enabled"}; ProxyStats stats_; diff --git a/source/extensions/filters/network/redis_proxy/router.h b/source/extensions/filters/network/redis_proxy/router.h deleted file mode 100644 index 1317b170aca4c..0000000000000 --- a/source/extensions/filters/network/redis_proxy/router.h +++ /dev/null @@ -1,42 +0,0 @@ -#pragma once - -#include -#include - -#include "envoy/common/pure.h" -#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" - -#include "extensions/filters/network/redis_proxy/conn_pool.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace RedisProxy { - -/* - * Decorator of a connection pool in order to enable key based routing. - */ -class Router { -public: - virtual ~Router() = default; - - /** - * Forwards the request to the connection pool that matches a route or uses the wildcard route - * when no match is found. - * @param key supplies the key of the current command. - * @param request supplies the RESP request to make. - * @param callbacks supplies the request callbacks. - * @return PoolRequest* a handle to the active request or nullptr if the request could not be made - * for some reason. - */ - virtual Common::Redis::Client::PoolRequest* - makeRequest(const std::string& key, const Common::Redis::RespValue& request, - Common::Redis::Client::PoolCallbacks& callbacks) PURE; -}; - -typedef std::unique_ptr RouterPtr; - -} // namespace RedisProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.cc b/source/extensions/filters/network/redis_proxy/router_impl.cc deleted file mode 100644 index 009cc345b3844..0000000000000 --- a/source/extensions/filters/network/redis_proxy/router_impl.cc +++ /dev/null @@ -1,68 +0,0 @@ -#include "extensions/filters/network/redis_proxy/router_impl.h" - -#include "common/common/fmt.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace RedisProxy { - -PrefixRoutes::PrefixRoutes( - const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& config, - Upstreams&& upstreams) - : case_insensitive_(config.case_insensitive()), upstreams_(std::move(upstreams)), - catch_all_upstream_(config.catch_all_cluster().empty() - ? nullptr - : upstreams_.at(config.catch_all_cluster())) { - - for (auto const& route : config.routes()) { - std::string copy(route.prefix()); - - if (case_insensitive_) { - to_lower_table_.toLowerCase(copy); - } - - auto success = prefix_lookup_table_.add(copy.c_str(), - std::make_shared(Prefix{ - route.prefix(), - route.remove_prefix(), - upstreams_.at(route.cluster()), - }), - false); - if (!success) { - throw EnvoyException(fmt::format("prefix `{}` already exists.", route.prefix())); - } - } -} - -Common::Redis::Client::PoolRequest* -PrefixRoutes::makeRequest(const std::string& key, const Common::Redis::RespValue& request, - Common::Redis::Client::PoolCallbacks& callbacks) { - - PrefixPtr value = nullptr; - if (case_insensitive_) { - std::string copy(key); - to_lower_table_.toLowerCase(copy); - value = prefix_lookup_table_.findLongestPrefix(copy.c_str()); - } else { - value = prefix_lookup_table_.findLongestPrefix(key.c_str()); - } - - if (value != nullptr) { - absl::string_view view(key); - if (value->remove_prefix) { - view.remove_prefix(value->prefix.length()); - } - std::string str(view); - value->upstream->makeRequest(str, request, callbacks); - } else if (catch_all_upstream_ != nullptr) { - catch_all_upstream_.value()->makeRequest(key, request, callbacks); - } - - return nullptr; -} - -} // namespace RedisProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.h b/source/extensions/filters/network/redis_proxy/router_impl.h deleted file mode 100644 index 0c3d50356c02d..0000000000000 --- a/source/extensions/filters/network/redis_proxy/router_impl.h +++ /dev/null @@ -1,55 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" -#include "envoy/thread_local/thread_local.h" -#include "envoy/upstream/cluster_manager.h" - -#include "common/common/to_lower_table.h" - -#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" -#include "extensions/filters/network/redis_proxy/router.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace RedisProxy { - -typedef std::map Upstreams; - -class PrefixRoutes : public Router { -public: - PrefixRoutes(const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& - prefix_routes, - Upstreams&& upstreams); - - Common::Redis::Client::PoolRequest* - makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request, - Common::Redis::Client::PoolCallbacks& callbacks) override; - -private: - struct Prefix { - const std::string prefix; - const bool remove_prefix; - ConnPool::InstanceSharedPtr upstream; - }; - - typedef std::shared_ptr PrefixPtr; - - TrieLookupTable prefix_lookup_table_; - const ToLowerTable to_lower_table_; - const bool case_insensitive_; - Upstreams upstreams_; - absl::optional catch_all_upstream_; -}; - -} // namespace RedisProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/common/common/utility_test.cc b/test/common/common/utility_test.cc index e2a084651065a..6434cd140280b 100644 --- a/test/common/common/utility_test.cc +++ b/test/common/common/utility_test.cc @@ -828,41 +828,4 @@ TEST(DateFormatter, FromTimeSameWildcard) { DateFormatter("%Y-%m-%dT%H:%M:%S.000Z%1f%2f").fromTime(time1)); } -TEST(TrieLookupTable, AddItems) { - TrieLookupTable trie; - EXPECT_TRUE(trie.add("foo", "a")); - EXPECT_TRUE(trie.add("bar", "b")); - EXPECT_EQ("a", trie.find("foo")); - EXPECT_EQ("b", trie.find("bar")); - - // overwrite_existing = false - EXPECT_FALSE(trie.add("foo", "c", false)); - EXPECT_EQ("a", trie.find("foo")); - - // overwrite_existing = true - EXPECT_TRUE(trie.add("foo", "c")); - EXPECT_EQ("c", trie.find("foo")); -} - -TEST(TrieLookupTable, LongestPrefix) { - TrieLookupTable trie; - EXPECT_TRUE(trie.add("foo", "a")); - EXPECT_TRUE(trie.add("bar", "b")); - EXPECT_TRUE(trie.add("baro", "c")); - - EXPECT_EQ("a", trie.find("foo")); - EXPECT_EQ("a", trie.findLongestPrefix("foo")); - EXPECT_EQ("a", trie.findLongestPrefix("foosball")); - - EXPECT_EQ("b", trie.find("bar")); - EXPECT_EQ("b", trie.findLongestPrefix("bar")); - EXPECT_EQ("b", trie.findLongestPrefix("baritone")); - EXPECT_EQ("c", trie.findLongestPrefix("barometer")); - - EXPECT_EQ(nullptr, trie.find("toto")); - EXPECT_EQ(nullptr, trie.findLongestPrefix("toto")); - EXPECT_EQ(nullptr, trie.find(" ")); - EXPECT_EQ(nullptr, trie.findLongestPrefix(" ")); -} - } // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 7b6629b6e4917..492404c41547e 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -75,7 +75,6 @@ envoy_cc_mock( "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", - "//source/extensions/filters/network/redis_proxy:router_interface", ], ) @@ -105,15 +104,3 @@ envoy_extension_cc_test_binary( "//test/test_common:simulated_time_system_lib", ], ) - -envoy_extension_cc_test( - name = "router_impl_test", - srcs = ["router_impl_test.cc"], - extension_name = "envoy.filters.network.redis_proxy", - deps = [ - ":redis_mocks", - "//source/extensions/filters/network/redis_proxy:router_lib", - "//test/extensions/filters/network/common/redis:redis_mocks", - "//test/test_common:utility_lib", - ], -) diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index d70fdb02a5e02..2f4d8e30e1b0b 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -30,7 +30,7 @@ class NoOpSplitCallbacks : public CommandSplitter::SplitCallbacks { void onResponse(Common::Redis::RespValuePtr&&) override {} }; -class NullRouterImpl : public Router { +class NullInstanceImpl : public ConnPool::Instance { Common::Redis::Client::PoolRequest* makeRequest(const std::string&, const Common::Redis::RespValue&, Common::Redis::Client::PoolCallbacks&) override { @@ -65,11 +65,11 @@ class CommandLookUpSpeedTest { } } - Router* router_{new NullRouterImpl()}; + ConnPool::Instance* conn_pool_{new NullInstanceImpl()}; Stats::IsolatedStoreImpl store_; Event::SimulatedTimeSystem time_system_; - CommandSplitter::InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, - false}; + CommandSplitter::InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", + time_system_, false}; NoOpSplitCallbacks callbacks_; CommandSplitter::SplitRequestPtr handle_; }; diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index ff52c8013496d..252078432334a 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -50,10 +50,11 @@ class RedisCommandSplitterImplTest : public testing::Test { value.asArray().swap(values); } - MockRouter* router_{new MockRouter()}; + ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; NiceMock store_; Event::SimulatedTimeSystem time_system_; - InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, false}; + InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", time_system_, + false}; MockSplitCallbacks callbacks_; SplitRequestPtr handle_; }; @@ -110,7 +111,7 @@ class RedisSingleServerRequestTest : public RedisCommandSplitterImplTest, public testing::WithParamInterface { public: void makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request) { - EXPECT_CALL(*router_, makeRequest(hash_key, Ref(request), _)) + EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(request), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); handle_ = splitter_.makeRequest(request, callbacks_); } @@ -222,7 +223,7 @@ TEST_P(RedisSingleServerRequestTest, NoUpstream) { Common::Redis::RespValue request; makeBulkStringArray(request, {GetParam(), "hello"}); - EXPECT_CALL(*router_, makeRequest("hello", Ref(request), _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*conn_pool_, makeRequest("hello", Ref(request), _)).WillOnce(Return(nullptr)); Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; @@ -323,7 +324,7 @@ TEST_F(RedisSingleServerRequestTest, EvalNoUpstream) { Common::Redis::RespValue request; makeBulkStringArray(request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); - EXPECT_CALL(*router_, makeRequest("key", Ref(request), _)).WillOnce(Return(nullptr)); + EXPECT_CALL(*conn_pool_, makeRequest("key", Ref(request), _)).WillOnce(Return(nullptr)); Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; @@ -358,7 +359,7 @@ class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest { null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; } - EXPECT_CALL(*router_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) + EXPECT_CALL(*conn_pool_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } @@ -561,7 +562,7 @@ class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest { null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; } - EXPECT_CALL(*router_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) + EXPECT_CALL(*conn_pool_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } @@ -684,7 +685,7 @@ class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest, null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; } - EXPECT_CALL(*router_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) + EXPECT_CALL(*conn_pool_, makeRequest(std::to_string(i), Eq(ByRef(expected_requests_[i])), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } @@ -772,13 +773,14 @@ INSTANTIATE_TEST_SUITE_P( class RedisSingleServerRequestWithLatencyMicrosTest : public RedisSingleServerRequestTest { public: void makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request) { - EXPECT_CALL(*router_, makeRequest(hash_key, Ref(request), _)) + EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(request), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); handle_ = splitter_.makeRequest(request, callbacks_); } - MockRouter* router_{new MockRouter()}; - InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, true}; + ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; + InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", time_system_, + true}; }; TEST_P(RedisSingleServerRequestWithLatencyMicrosTest, Success) { diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index be23782420b43..074862e5718c8 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -23,21 +23,6 @@ TEST(RedisProxyFilterConfigFactoryTest, ValidateFail) { ProtoValidationException); } -TEST(RedisProxyFilterConfigFactoryTest, NoUpstreamDefined) { - envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings settings; - settings.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(20)); - - envoy::config::filter::network::redis_proxy::v2::RedisProxy config; - config.set_stat_prefix("foo"); - config.mutable_settings()->CopyFrom(settings); - - NiceMock context; - - EXPECT_THROW_WITH_MESSAGE( - RedisProxyFilterConfigFactory().createFilterFactoryFromProto(config, context), EnvoyException, - "cannot configure a redis-proxy without any upstream"); -} - TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectJson) { std::string json_string = R"EOF( { diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index 464b1eff494f1..bd267cd1670d2 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -43,8 +43,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client if (!cluster_exists) { EXPECT_CALL(cm_, get("fake_cluster")).WillOnce(Return(nullptr)); } - - conn_pool_ = std::make_shared(cluster_name_, cm_, *this, tls_, + conn_pool_ = std::make_unique(cluster_name_, cm_, *this, tls_, Common::Redis::Client::createConnPoolSettings()); } @@ -75,7 +74,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client const std::string cluster_name_{"fake_cluster"}; NiceMock cm_; NiceMock tls_; - InstanceSharedPtr conn_pool_; + InstancePtr conn_pool_; Upstream::ClusterUpdateCallbacks* update_callbacks_{}; Common::Redis::Client::MockClient* client_{}; }; diff --git a/test/extensions/filters/network/redis_proxy/mocks.cc b/test/extensions/filters/network/redis_proxy/mocks.cc index 3bbb28baba804..7e0ce1eff0bde 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.cc +++ b/test/extensions/filters/network/redis_proxy/mocks.cc @@ -15,9 +15,6 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { -MockRouter::MockRouter() {} -MockRouter::~MockRouter() {} - namespace ConnPool { MockInstance::MockInstance() {} diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index e959475542654..19c724ac74478 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -8,7 +8,6 @@ #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" -#include "extensions/filters/network/redis_proxy/router.h" #include "test/test_common/printers.h" @@ -19,17 +18,6 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { -class MockRouter : public Router { -public: - MockRouter(); - ~MockRouter(); - - MOCK_METHOD3(makeRequest, - Common::Redis::Client::PoolRequest*( - const std::string& hash_key, const Common::Redis::RespValue& request, - Common::Redis::Client::PoolCallbacks& callbacks)); -}; - namespace ConnPool { class MockInstance : public Instance { diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index 4cb73b89186b2..333a9687dc501 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -60,7 +60,7 @@ TEST_F(RedisProxyFilterConfigTest, Normal) { envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config = parseProtoFromJson(json_string); ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_); - EXPECT_EQ("redis.foo.", config.stat_prefix_); + EXPECT_EQ("fake_cluster", config.cluster_name_); } TEST_F(RedisProxyFilterConfigTest, BadRedisProxyConfig) { diff --git a/test/extensions/filters/network/redis_proxy/router_impl_test.cc b/test/extensions/filters/network/redis_proxy/router_impl_test.cc deleted file mode 100644 index 62b012e4abc27..0000000000000 --- a/test/extensions/filters/network/redis_proxy/router_impl_test.cc +++ /dev/null @@ -1,199 +0,0 @@ -#include - -#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" -#include "extensions/filters/network/redis_proxy/router_impl.h" - -#include "test/extensions/filters/network/common/redis/mocks.h" -#include "test/extensions/filters/network/redis_proxy/mocks.h" -#include "test/test_common/utility.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::Eq; -using testing::InSequence; -using testing::Return; -using testing::StrEq; - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace RedisProxy { - -envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes createPrefixRoutes() { - envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes prefix_routes; - auto* routes = prefix_routes.mutable_routes(); - - { - auto* route = routes->Add(); - route->set_prefix("ab"); - route->set_cluster("fake_clusterA"); - } - - { - auto* route = routes->Add(); - route->set_prefix("a"); - route->set_cluster("fake_clusterB"); - } - - return prefix_routes; -} - -TEST(PrefixRoutesTest, MissingCatchAll) { - Upstreams upstreams; - upstreams.emplace("fake_clusterA", std::make_shared()); - upstreams.emplace("fake_clusterB", std::make_shared()); - - PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); - - Common::Redis::RespValue value; - Common::Redis::Client::MockPoolCallbacks callbacks; - - EXPECT_EQ(nullptr, router.makeRequest("c:bar", value, callbacks)); -} - -TEST(PrefixRoutesTest, RoutedToCatchAll) { - auto upstream_c = std::make_shared(); - - Upstreams upstreams; - upstreams.emplace("fake_clusterA", std::make_shared()); - upstreams.emplace("fake_clusterB", std::make_shared()); - upstreams.emplace("fake_clusterC", upstream_c); - - auto prefix_routes = createPrefixRoutes(); - prefix_routes.set_catch_all_cluster("fake_clusterC"); - - EXPECT_CALL(*upstream_c, makeRequest(Eq("c:bar"), _, _)); - - PrefixRoutes router(prefix_routes, std::move(upstreams)); - Common::Redis::RespValue value; - Common::Redis::Client::MockPoolCallbacks callbacks; - - EXPECT_EQ(nullptr, router.makeRequest("c:bar", value, callbacks)); -} - -TEST(PrefixRoutesTest, RoutedToLongestPrefix) { - auto upstream_a = std::make_shared(); - - Upstreams upstreams; - upstreams.emplace("fake_clusterA", upstream_a); - upstreams.emplace("fake_clusterB", std::make_shared()); - - EXPECT_CALL(*upstream_a, makeRequest(Eq("ab:bar"), _, _)); - - PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); - Common::Redis::RespValue value; - Common::Redis::Client::MockPoolCallbacks callbacks; - - EXPECT_EQ(nullptr, router.makeRequest("ab:bar", value, callbacks)); -} - -TEST(PrefixRoutesTest, CaseUnsensitivePrefix) { - auto upstream_a = std::make_shared(); - - Upstreams upstreams; - upstreams.emplace("fake_clusterA", upstream_a); - upstreams.emplace("fake_clusterB", std::make_shared()); - - auto prefix_routes = createPrefixRoutes(); - prefix_routes.set_case_insensitive(true); - - EXPECT_CALL(*upstream_a, makeRequest(Eq("AB:bar"), _, _)); - - PrefixRoutes router(prefix_routes, std::move(upstreams)); - Common::Redis::RespValue value; - Common::Redis::Client::MockPoolCallbacks callbacks; - - EXPECT_EQ(nullptr, router.makeRequest("AB:bar", value, callbacks)); -} - -TEST(PrefixRoutesTest, RemovePrefix) { - auto upstream_a = std::make_shared(); - - Upstreams upstreams; - upstreams.emplace("fake_clusterA", upstream_a); - upstreams.emplace("fake_clusterB", std::make_shared()); - - auto prefix_routes = createPrefixRoutes(); - - { - auto* route = prefix_routes.mutable_routes()->Add(); - route->set_prefix("abc"); - route->set_cluster("fake_clusterA"); - route->set_remove_prefix(true); - } - - EXPECT_CALL(*upstream_a, makeRequest(Eq(":bar"), _, _)); - - PrefixRoutes router(prefix_routes, std::move(upstreams)); - Common::Redis::RespValue value; - Common::Redis::Client::MockPoolCallbacks callbacks; - - EXPECT_EQ(nullptr, router.makeRequest("abc:bar", value, callbacks)); -} - -TEST(PrefixRoutesTest, RoutedToShortestPrefix) { - auto upstream_b = std::make_shared(); - - Upstreams upstreams; - upstreams.emplace("fake_clusterA", std::make_shared()); - upstreams.emplace("fake_clusterB", upstream_b); - - EXPECT_CALL(*upstream_b, makeRequest(Eq("a:bar"), _, _)); - - PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); - Common::Redis::RespValue value; - Common::Redis::Client::MockPoolCallbacks callbacks; - - EXPECT_EQ(nullptr, router.makeRequest("a:bar", value, callbacks)); -} - -TEST(PrefixRoutesTest, DifferentPrefixesSameUpstream) { - auto upstream_b = std::make_shared(); - - Upstreams upstreams; - upstreams.emplace("fake_clusterA", std::make_shared()); - upstreams.emplace("fake_clusterB", upstream_b); - - auto prefix_routes = createPrefixRoutes(); - - { - auto* route = prefix_routes.mutable_routes()->Add(); - route->set_prefix("also_route_to_b"); - route->set_cluster("fake_clusterB"); - } - - EXPECT_CALL(*upstream_b, makeRequest(Eq("a:bar"), _, _)); - EXPECT_CALL(*upstream_b, makeRequest(Eq("also_route_to_b:bar"), _, _)); - - PrefixRoutes router(prefix_routes, std::move(upstreams)); - Common::Redis::RespValue value; - Common::Redis::Client::MockPoolCallbacks callbacks; - - EXPECT_EQ(nullptr, router.makeRequest("a:bar", value, callbacks)); - EXPECT_EQ(nullptr, router.makeRequest("also_route_to_b:bar", value, callbacks)); -} - -TEST(PrefixRoutesTest, DuplicatePrefix) { - Upstreams upstreams; - upstreams.emplace("fake_clusterA", std::make_shared()); - upstreams.emplace("fake_clusterB", std::make_shared()); - upstreams.emplace("this_will_throw", std::make_shared()); - - auto prefix_routes = createPrefixRoutes(); - - { - auto* route = prefix_routes.mutable_routes()->Add(); - route->set_prefix("ab"); - route->set_cluster("this_will_throw"); - } - - EXPECT_THROW_WITH_MESSAGE(PrefixRoutes router(prefix_routes, std::move(upstreams)), - EnvoyException, "prefix `ab` already exists.") -} - -} // namespace RedisProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy From 845f8378cf49709c465ec40b12a039eb87382b6b Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 27 Mar 2019 20:17:14 -0400 Subject: [PATCH 030/165] runtime: switching from unordered_map to absl::flat_hash_map (#6389) This allows us to move the new runtime APIs over to string_view without taking a string-serialization performance hit. see https://abseil.io/docs/cpp/guides/container for flat_hash_map being a unordered_map replacement with heterogeneous lookup for string_view. Risk Level: Medium (swapping the underlying internals of runtime) Testing: existing tests pass Docs Changes: no Release Notes: no --- include/envoy/runtime/runtime.h | 9 +++++---- source/common/runtime/runtime_features.h | 5 ++--- source/common/runtime/runtime_impl.cc | 6 +++--- source/common/runtime/runtime_impl.h | 6 +++--- test/mocks/runtime/mocks.h | 4 ++-- test/server/http/admin_test.cc | 11 +++++------ 6 files changed, 20 insertions(+), 21 deletions(-) diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 499fd48e5ee6e..b5e308b14d4ac 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -12,6 +12,7 @@ #include "common/common/assert.h" #include "common/singleton/threadsafe_singleton.h" +#include "absl/container/flat_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -52,7 +53,7 @@ class Snapshot { absl::optional bool_value_; }; - typedef std::unordered_map EntryMap; + typedef absl::flat_hash_map EntryMap; /** * A provider of runtime values. One or more of these compose the snapshot's source of values, @@ -63,9 +64,9 @@ class Snapshot { virtual ~OverrideLayer() {} /** - * @return const std::unordered_map& the values in this layer. + * @return const absl::flat_hash_map& the values in this layer. */ - virtual const std::unordered_map& values() const PURE; + virtual const EntryMap& values() const PURE; /** * @return const std::string& a user-friendly alias for this layer, e.g. "admin" or "disk". @@ -88,7 +89,7 @@ class Snapshot { // Runtime features are used to easily allow switching between old and new code paths for high // risk changes. The intent is for the old code path to be short lived - the old code path is // deprecated as the feature is defaulted true, and removed with the following Envoy release. - virtual bool runtimeFeatureEnabled(const std::string& key) const PURE; + virtual bool runtimeFeatureEnabled(absl::string_view key) const PURE; /** * Test if a feature is enabled using the built in random generator. This is done by generating diff --git a/source/common/runtime/runtime_features.h b/source/common/runtime/runtime_features.h index 340eae0d5b4f3..cac8726c3f7af 100644 --- a/source/common/runtime/runtime_features.h +++ b/source/common/runtime/runtime_features.h @@ -9,7 +9,6 @@ namespace Envoy { namespace Runtime { -// TODO(alyssawilk) convert these to string view. class RuntimeFeatures { public: RuntimeFeatures(); @@ -17,13 +16,13 @@ class RuntimeFeatures { // This tracks proto configured features, to determine if a given deprecated // feature is still allowed, or has been made fatal-by-default per the Envoy // deprecation process. - bool disallowedByDefault(const std::string& feature) const { + bool disallowedByDefault(absl::string_view feature) const { return disallowed_features_.find(feature) != disallowed_features_.end(); } // This tracks config-guarded code paths, to determine if a given // runtime-guarded-code-path has the new code run by default or the old code. - bool enabledByDefault(const std::string& feature) const { + bool enabledByDefault(absl::string_view feature) const { return enabled_features_.find(feature) != enabled_features_.end(); } diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 8124cb21b1a67..33e0e42f10f91 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -22,7 +22,7 @@ namespace Envoy { namespace Runtime { -bool runtimeFeatureEnabled(const std::string& feature) { +bool runtimeFeatureEnabled(absl::string_view feature) { ASSERT(absl::StartsWith(feature, "envoy.reloadable_features")); if (Runtime::LoaderSingleton::getExisting()) { return Runtime::LoaderSingleton::getExisting()->snapshot().runtimeFeatureEnabled(feature); @@ -174,7 +174,7 @@ bool SnapshotImpl::deprecatedFeatureEnabled(const std::string& key) const { return true; } -bool SnapshotImpl::runtimeFeatureEnabled(const std::string& key) const { +bool SnapshotImpl::runtimeFeatureEnabled(absl::string_view key) const { bool enabled = false; // If the value is not explicitly set as a runtime boolean, the default value is based on // disallowedByDefault. @@ -257,7 +257,7 @@ uint64_t SnapshotImpl::getInteger(const std::string& key, uint64_t default_value } } -bool SnapshotImpl::getBoolean(const std::string& key, bool& value) const { +bool SnapshotImpl::getBoolean(absl::string_view key, bool& value) const { auto entry = values_.find(key); if (entry != values_.end() && entry->second.bool_value_.has_value()) { value = entry->second.bool_value_.value(); diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 99d812023d33f..1257d36407c9f 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -24,7 +24,7 @@ namespace Envoy { namespace Runtime { -bool runtimeFeatureEnabled(const std::string& feature); +bool runtimeFeatureEnabled(absl::string_view feature); using RuntimeSingleton = ThreadSafeSingleton; @@ -74,7 +74,7 @@ class SnapshotImpl : public Snapshot, // Runtime::Snapshot bool deprecatedFeatureEnabled(const std::string& key) const override; - bool runtimeFeatureEnabled(const std::string& key) const override; + bool runtimeFeatureEnabled(absl::string_view key) const override; bool featureEnabled(const std::string& key, uint64_t default_value, uint64_t random_value, uint64_t num_buckets) const override; bool featureEnabled(const std::string& key, uint64_t default_value) const override; @@ -92,7 +92,7 @@ class SnapshotImpl : public Snapshot, // Returns true and sets 'value' to the key if found. // Returns false if the key is not a boolean value. - bool getBoolean(const std::string& key, bool& value) const; + bool getBoolean(absl::string_view key, bool& value) const; private: static void resolveEntryType(Entry& entry) { diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 2753c09593b63..151d3e2112ca3 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -28,7 +28,7 @@ class MockSnapshot : public Snapshot { ~MockSnapshot() override; MOCK_CONST_METHOD1(deprecatedFeatureEnabled, bool(const std::string& key)); - MOCK_CONST_METHOD1(runtimeFeatureEnabled, bool(const std::string& key)); + MOCK_CONST_METHOD1(runtimeFeatureEnabled, bool(absl::string_view key)); MOCK_CONST_METHOD2(featureEnabled, bool(const std::string& key, uint64_t default_value)); MOCK_CONST_METHOD3(featureEnabled, bool(const std::string& key, uint64_t default_value, uint64_t random_value)); @@ -61,7 +61,7 @@ class MockOverrideLayer : public Snapshot::OverrideLayer { ~MockOverrideLayer(); MOCK_CONST_METHOD0(name, const std::string&()); - MOCK_CONST_METHOD0(values, const std::unordered_map&()); + MOCK_CONST_METHOD0(values, const Snapshot::EntryMap&()); }; } // namespace Runtime diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index bffaf0c9380f6..25e45c60c24ec 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -934,12 +934,11 @@ TEST_P(AdminInstanceTest, Runtime) { Runtime::MockLoader loader; auto layer1 = std::make_unique>(); auto layer2 = std::make_unique>(); - std::unordered_map entries2{ - {"string_key", {"override", {}, {}, {}}}, {"extra_key", {"bar", {}, {}, {}}}}; - std::unordered_map entries1{ - {"string_key", {"foo", {}, {}, {}}}, - {"int_key", {"1", 1, {}, {}}}, - {"other_key", {"bar", {}, {}, {}}}}; + Runtime::Snapshot::EntryMap entries2{{"string_key", {"override", {}, {}, {}}}, + {"extra_key", {"bar", {}, {}, {}}}}; + Runtime::Snapshot::EntryMap entries1{{"string_key", {"foo", {}, {}, {}}}, + {"int_key", {"1", 1, {}, {}}}, + {"other_key", {"bar", {}, {}, {}}}}; ON_CALL(*layer1, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer1"})); ON_CALL(*layer1, values()).WillByDefault(testing::ReturnRef(entries1)); From c949a8144cf3b0162133dde0c489dea8a4078a47 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 28 Mar 2019 01:31:56 -0700 Subject: [PATCH 031/165] docs: more snapping fixes (#6404) * docs: more snapping fixes Signed-off-by: Matt Klein --- api/STYLE.md | 2 +- api/envoy/config/metrics/v2/stats.proto | 10 ++- .../configuration/overview/v2_overview.rst | 61 +++++++++---------- docs/root/configuration/secret.rst | 4 +- examples/cors/backend/Dockerfile-service | 2 +- examples/cors/frontend/Dockerfile-service | 2 +- examples/front-proxy/Dockerfile-service | 2 +- 7 files changed, 40 insertions(+), 43 deletions(-) diff --git a/api/STYLE.md b/api/STYLE.md index 887a6c53a45b8..0289c5f85af27 100644 --- a/api/STYLE.md +++ b/api/STYLE.md @@ -123,7 +123,7 @@ In addition, the following conventions should be followed: ``` * The [Breaking Change - Policy](https://github.com/envoyproxy/envoy/blob/master//CONTRIBUTING.md#breaking-change-policy) describes + Policy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md#breaking-change-policy) describes API versioning, deprecation and compatibility. ## Package organization diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index 27a838124a066..08172180b5451 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -59,9 +59,8 @@ message StatsConfig { // If any default tags are specified twice, the config will be considered // invalid. // - // See `well_known_names.h - // `_ - // for a list of the default tags in Envoy. + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. // // If not provided, the value is assumed to be true. google.protobuf.BoolValue use_all_default_tags = 2; @@ -166,9 +165,8 @@ message StatsMatcher { message TagSpecifier { // Attaches an identifier to the tag values to identify the tag being in the // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in `well_known_names.h - // `_ - // in the Envoy repository. If a :ref:`tag_name + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name // ` is provided in the config and // neither :ref:`regex ` or // :ref:`fixed_value ` were specified, diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst index de78c974e5915..c296684d1099a 100644 --- a/docs/root/configuration/overview/v2_overview.rst +++ b/docs/root/configuration/overview/v2_overview.rst @@ -8,19 +8,19 @@ The Envoy v2 APIs are defined as `proto3 `_ in the `data plane API repository `_. They support -* Streaming delivery of `xDS `_ - API updates via gRPC. This reduces resource requirements and can lower the update latency. +* Streaming delivery of :repo:`xDS ` API updates via gRPC. This reduces + resource requirements and can lower the update latency. * A new REST-JSON API in which the JSON/YAML formats are derived mechanically via the `proto3 canonical JSON mapping `_. * Delivery of updates via the filesystem, REST-JSON or gRPC endpoints. * Advanced load balancing through an extended endpoint assignment API and load and resource utilization reporting to management servers. -* `Stronger consistency and ordering properties - `_ +* :repo:`Stronger consistency and ordering properties + ` when needed. The v2 APIs still maintain a baseline eventual consistency model. -See the `xDS protocol description `_ for +See the :repo:`xDS protocol description ` for further details on aspects of v2 message exchange between Envoy and the management server. .. _config_overview_v2_bootstrap: @@ -199,8 +199,8 @@ In the above example, the EDS management server could then return a proto encodi The versioning and type URL scheme that appear above are explained in more -detail in the `streaming gRPC subscription protocol -`_ +detail in the :repo:`streaming gRPC subscription protocol +` documentation. Dynamic @@ -352,7 +352,7 @@ A v2 xDS management server will implement the below endpoints as required for gRPC and/or REST serving. In both streaming gRPC and REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a :ref:`DiscoveryResponse ` received following the -`xDS protocol `_. +:repo:`xDS protocol `. .. _v2_grpc_streaming_endpoints: @@ -361,9 +361,8 @@ gRPC streaming endpoints .. http:post:: /envoy.api.v2.ClusterDiscoveryService/StreamClusters -See `cds.proto -`_ -for the service definition. This is used by Envoy as a client when +See :repo:`cds.proto ` for the service definition. This is used by Envoy +as a client when .. code-block:: yaml @@ -380,8 +379,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /envoy.api.v2.EndpointDiscoveryService/StreamEndpoints -See `eds.proto -`_ +See :repo:`eds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -399,8 +398,8 @@ is set in the :ref:`eds_cluster_config .. http:post:: /envoy.api.v2.ListenerDiscoveryService/StreamListeners -See `lds.proto -`_ +See :repo:`lds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -418,8 +417,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /envoy.api.v2.RouteDiscoveryService/StreamRoutes -See `rds.proto -`_ +See :repo:`rds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -441,8 +440,8 @@ REST endpoints .. http:post:: /v2/discovery:clusters -See `cds.proto -`_ +See :repo:`cds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -458,8 +457,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /v2/discovery:endpoints -See `eds.proto -`_ +See :repo:`eds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -475,8 +474,8 @@ is set in the :ref:`eds_cluster_config .. http:post:: /v2/discovery:listeners -See `lds.proto -`_ +See :repo:`lds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -492,8 +491,8 @@ is set in the :ref:`dynamic_resources .. http:post:: /v2/discovery:routes -See `rds.proto -`_ +See :repo:`rds.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -536,14 +535,14 @@ synchronization to correctly sequence the update. With ADS, the management server would deliver the CDS, EDS and then RDS updates on a single stream. ADS is only available for gRPC streaming (not REST) and is described more fully -in `this -`_ +in :repo:`this +` document. The gRPC endpoint is: .. http:post:: /envoy.api.v2.AggregatedDiscoveryService/StreamAggregatedResources -See `discovery.proto -`_ +See :repo:`discovery.proto +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -622,8 +621,8 @@ means that we will not break wire format compatibility. manner that does not break `backwards compatibility `_. Fields in the above protos may be later deprecated, subject to the -`breaking change policy -`_, +:repo:`breaking change policy +`, when their related functionality is no longer required. While frozen APIs have their wire format compatibility preserved, we reserve the right to change proto namespaces, file locations and nesting relationships, which may cause diff --git a/docs/root/configuration/secret.rst b/docs/root/configuration/secret.rst index 2f6445260eb96..bf42233583fce 100644 --- a/docs/root/configuration/secret.rst +++ b/docs/root/configuration/secret.rst @@ -20,8 +20,8 @@ The connection between Envoy proxy and SDS server has to be secure. One option i SDS server ---------- -A SDS server needs to implement the gRPC service `SecretDiscoveryService `_. -It follows the same protocol as other `xDS `_ +A SDS server needs to implement the gRPC service :repo:`SecretDiscoveryService `. +It follows the same protocol as other :repo:`xDS `. SDS Configuration ----------------- diff --git a/examples/cors/backend/Dockerfile-service b/examples/cors/backend/Dockerfile-service index 5e5013002cf55..89b5fc12736ec 100644 --- a/examples/cors/backend/Dockerfile-service +++ b/examples/cors/backend/Dockerfile-service @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-alpine:latest +FROM envoyproxy/envoy-alpine-dev:latest RUN apk update && apk add python3 bash RUN pip3 install -q Flask==0.11.1 diff --git a/examples/cors/frontend/Dockerfile-service b/examples/cors/frontend/Dockerfile-service index 6ba3b484e833a..8d882faa172fc 100644 --- a/examples/cors/frontend/Dockerfile-service +++ b/examples/cors/frontend/Dockerfile-service @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-alpine:latest +FROM envoyproxy/envoy-alpine-dev:latest RUN apk update && apk add python3 bash RUN pip3 install -q Flask==0.11.1 diff --git a/examples/front-proxy/Dockerfile-service b/examples/front-proxy/Dockerfile-service index ba7e5dbd2f011..c3f5bafefc19b 100644 --- a/examples/front-proxy/Dockerfile-service +++ b/examples/front-proxy/Dockerfile-service @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-alpine:latest +FROM envoyproxy/envoy-alpine-dev:latest RUN apk update && apk add python3 bash curl RUN pip3 install -q Flask==0.11.1 requests==2.18.4 From 7eee8b305f4644c03c13b45ced7a1494709ebe5e Mon Sep 17 00:00:00 2001 From: Adam Laiacano Date: Thu, 28 Mar 2019 12:41:54 -0400 Subject: [PATCH 032/165] include required python and go dependencies for grpc-bridge example (#6402) Signed-off-by: Adam Laiacano --- examples/grpc-bridge/Dockerfile-python | 2 +- examples/grpc-bridge/client/client.py | 2 +- examples/grpc-bridge/client/requirements.txt | 1 + examples/grpc-bridge/script/bootstrap | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) mode change 100644 => 100755 examples/grpc-bridge/client/client.py diff --git a/examples/grpc-bridge/Dockerfile-python b/examples/grpc-bridge/Dockerfile-python index 84d468d2b3d2f..e90c8a469a5c9 100644 --- a/examples/grpc-bridge/Dockerfile-python +++ b/examples/grpc-bridge/Dockerfile-python @@ -3,8 +3,8 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update RUN apt-get -q install -y python-dev \ python-pip -RUN pip install -q grpcio protobuf requests ADD ./client /client +RUN pip install -r /client/requirements.txt RUN chmod a+x /client/client.py RUN mkdir /var/log/envoy/ CMD /usr/local/bin/envoy -c /etc/s2s-python-envoy.yaml diff --git a/examples/grpc-bridge/client/client.py b/examples/grpc-bridge/client/client.py old mode 100644 new mode 100755 index 2ae336341a100..b1cba24bad2a4 --- a/examples/grpc-bridge/client/client.py +++ b/examples/grpc-bridge/client/client.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python import requests, sys import kv_pb2 as kv diff --git a/examples/grpc-bridge/client/requirements.txt b/examples/grpc-bridge/client/requirements.txt index 2f874cf2e4803..5f42b3c4c74e5 100644 --- a/examples/grpc-bridge/client/requirements.txt +++ b/examples/grpc-bridge/client/requirements.txt @@ -1,2 +1,3 @@ requests>=2.20.0 grpcio +protobuf==3.7.0 diff --git a/examples/grpc-bridge/script/bootstrap b/examples/grpc-bridge/script/bootstrap index 3d8207cb3d68b..d8ba5a4f242a0 100755 --- a/examples/grpc-bridge/script/bootstrap +++ b/examples/grpc-bridge/script/bootstrap @@ -6,5 +6,6 @@ cd $(dirname $0)/.. echo "fetching dependencies..." go get golang.org/x/net/context +go get golang.org/x/sys/unix go get google.golang.org/grpc echo "done" From 792e70dc0291a8d99a1783dfc27af5325f863a8a Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Thu, 28 Mar 2019 10:53:37 -0700 Subject: [PATCH 033/165] test: convert ratelimit test configs to v2 YAML (#6411) Signed-off-by: Derek Argueta --- test/common/router/BUILD | 2 +- test/common/router/router_ratelimit_test.cc | 567 +++++++------------- 2 files changed, 199 insertions(+), 370 deletions(-) diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 8a3de57a8909e..08811d0e06653 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -137,7 +137,7 @@ envoy_cc_test( srcs = ["router_ratelimit_test.cc"], deps = [ "//source/common/http:header_map_lib", - "//source/common/json:json_loader_lib", + "//source/common/protobuf:utility_lib", "//source/common/router:config_lib", "//source/common/router:router_ratelimit_lib", "//test/mocks/http:http_mocks", diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 2735ec873423a..da129018b2522 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -2,10 +2,11 @@ #include #include -#include "common/config/rds_json.h" +#include "envoy/api/v2/route/route.pb.validate.h" + #include "common/http/header_map_impl.h" -#include "common/json/json_loader.h" #include "common/network/address_impl.h" +#include "common/protobuf/utility.h" #include "common/router/config_impl.h" #include "common/router/router_ratelimit.h" @@ -27,69 +28,54 @@ namespace Envoy { namespace Router { namespace { -envoy::api::v2::route::RateLimit parseRateLimitFromJson(const std::string& json_string) { +envoy::api::v2::route::RateLimit parseRateLimitFromV2Yaml(const std::string& yaml_string) { envoy::api::v2::route::RateLimit rate_limit; - auto json_object_ptr = Json::Factory::loadFromString(json_string); - Envoy::Config::RdsJson::translateRateLimit(*json_object_ptr, rate_limit); + MessageUtil::loadFromYaml(yaml_string, rate_limit); + MessageUtil::validate(rate_limit); return rate_limit; } TEST(BadRateLimitConfiguration, MissingActions) { - EXPECT_THROW(parseRateLimitFromJson("{}"), EnvoyException); + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml("{}"), EnvoyException, + "value must contain at least"); } TEST(BadRateLimitConfiguration, BadType) { - std::string json = R"EOF( - { - "actions":[ - { - "type": "bad_type" - } - ] - } + const std::string yaml = R"EOF( +actions: +- bad_type: {} )EOF"; - EXPECT_THROW(RateLimitPolicyEntryImpl(parseRateLimitFromJson(json)), EnvoyException); + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml), EnvoyException, + "bad_type: Cannot find field"); } TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { - std::string json_one = R"EOF( - { - "actions":[ - { - "type": "request_headers" - } - ] - } + const std::string yaml_one = R"EOF( +actions: +- request_headers: {} )EOF"; - EXPECT_THROW(RateLimitPolicyEntryImpl(parseRateLimitFromJson(json_one)), EnvoyException); + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_one), EnvoyException, + "value length must be at least"); - std::string json_two = R"EOF( - { - "actions":[ - { - "type": "request_headers", - "header_name" : "test" - } - ] - } + const std::string yaml_two = R"EOF( +actions: +- request_headers: + header_name: test )EOF"; - EXPECT_THROW(RateLimitPolicyEntryImpl(parseRateLimitFromJson(json_two)), EnvoyException); + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_two), EnvoyException, + "value length must be at least"); - std::string json_three = R"EOF( - { - "actions":[ - { - "type": "request_headers", - "descriptor_key" : "test" - } - ] - } + const std::string yaml_three = R"EOF( +actions: +- request_headers: + descriptor_key: test )EOF"; - EXPECT_THROW(RateLimitPolicyEntryImpl(parseRateLimitFromJson(json_three)), EnvoyException); + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_three), EnvoyException, + "value length must be at least"); } static Http::TestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, @@ -99,11 +85,9 @@ static Http::TestHeaderMapImpl genHeaders(const std::string& host, const std::st class RateLimitConfiguration : public testing::Test { public: - void setupTest(const std::string& json) { + void setupTest(const std::string& yaml) { envoy::api::v2::RouteConfiguration route_config; - auto json_object_ptr = Json::Factory::loadFromString(json); - Envoy::Config::RdsJson::translateRouteConfiguration(*json_object_ptr, route_config, - stats_options); + MessageUtil::loadFromYaml(yaml, route_config); config_ = std::make_unique(route_config, factory_context_, true); } @@ -116,37 +100,26 @@ class RateLimitConfiguration : public testing::Test { }; TEST_F(RateLimitConfiguration, NoApplicableRateLimit) { - std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2", - "rate_limits": [ - { - "actions":[ - { - "type": "remote_address" - } - ] - } - ] - }, - { - "prefix": "/bar", - "cluster": "www2" - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2 + rate_limits: + - actions: + - remote_address: {} + - match: + prefix: "/bar" + route: + cluster: www2 )EOF"; - setupTest(json); + setupTest(yaml); EXPECT_EQ(0U, config_->route(genHeaders("www.lyft.com", "/bar", "GET"), 0) ->routeEntry() @@ -156,24 +129,19 @@ TEST_F(RateLimitConfiguration, NoApplicableRateLimit) { } TEST_F(RateLimitConfiguration, NoRateLimitPolicy) { - std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "cluster": "www2" - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + cluster: www2 )EOF"; - setupTest(json); + setupTest(yaml); route_ = config_->route(genHeaders("www.lyft.com", "/bar", "GET"), 0)->routeEntry(); EXPECT_EQ(0U, route_->rateLimitPolicy().getApplicableRateLimit(0).size()); @@ -181,33 +149,22 @@ TEST_F(RateLimitConfiguration, NoRateLimitPolicy) { } TEST_F(RateLimitConfiguration, TestGetApplicationRateLimit) { - std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2", - "rate_limits": [ - { - "actions":[ - { - "type": "remote_address" - } - ] - } - ] - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2 + rate_limits: + - actions: + - remote_address: {} )EOF"; - setupTest(json); + setupTest(yaml); route_ = config_->route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry(); EXPECT_FALSE(route_->rateLimitPolicy().empty()); @@ -224,33 +181,22 @@ TEST_F(RateLimitConfiguration, TestGetApplicationRateLimit) { } TEST_F(RateLimitConfiguration, TestVirtualHost) { - std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/", - "cluster": "www2test" - } - ], - "rate_limits": [ - { - "actions": [ - { - "type": "destination_cluster" - } - ] - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + cluster: www2test + rate_limits: + - actions: + - destination_cluster: {} )EOF"; - setupTest(json); + setupTest(yaml); route_ = config_->route(genHeaders("www.lyft.com", "/bar", "GET"), 0)->routeEntry(); std::vector> rate_limits = @@ -267,51 +213,28 @@ TEST_F(RateLimitConfiguration, TestVirtualHost) { } TEST_F(RateLimitConfiguration, Stages) { - std::string json = R"EOF( - { - "virtual_hosts": [ - { - "name": "www2", - "domains": ["www.lyft.com"], - "routes": [ - { - "prefix": "/foo", - "cluster": "www2test", - "rate_limits": [ - { - "stage": 1, - "actions": [ - { - "type": "remote_address" - } - ] - }, - { - "actions" : [ - { - "type" : "destination_cluster" - } - ] - }, - { - "actions": [ - { - "type" : "destination_cluster" - }, - { - "type": "source_cluster" - } - ] - } - ] - } - ] - } - ] - } + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2test + rate_limits: + - stage: 1 + actions: + - remote_address: {} + - actions: + - destination_cluster: {} + - actions: + - destination_cluster: {} + - source_cluster: {} )EOF"; - setupTest(json); + setupTest(yaml); route_ = config_->route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry(); std::vector> rate_limits = @@ -345,8 +268,8 @@ TEST_F(RateLimitConfiguration, Stages) { class RateLimitPolicyEntryTest : public testing::Test { public: - void setupTest(const std::string& json) { - rate_limit_entry_ = std::make_unique(parseRateLimitFromJson(json)); + void setupTest(const std::string& yaml) { + rate_limit_entry_ = std::make_unique(parseRateLimitFromV2Yaml(yaml)); descriptors_.clear(); } @@ -358,36 +281,26 @@ class RateLimitPolicyEntryTest : public testing::Test { }; TEST_F(RateLimitPolicyEntryTest, RateLimitPolicyEntryMembers) { - std::string json = R"EOF( - { - "stage": 2, - "disable_key": "no_ratelimit", - "actions": [ - { - "type": "remote_address" - } - ] - } + const std::string yaml = R"EOF( +stage: 2 +disable_key: no_ratelimit +actions: +- remote_address: {} )EOF"; - setupTest(json); + setupTest(yaml); EXPECT_EQ(2UL, rate_limit_entry_->stage()); EXPECT_EQ("no_ratelimit", rate_limit_entry_->disableKey()); } TEST_F(RateLimitPolicyEntryTest, RemoteAddress) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "remote_address" - } - ] - } + const std::string yaml = R"EOF( +actions: +- remote_address: {} )EOF"; - setupTest(json); + setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_); @@ -397,17 +310,12 @@ TEST_F(RateLimitPolicyEntryTest, RemoteAddress) { // Verify no descriptor is emitted if remote is a pipe. TEST_F(RateLimitPolicyEntryTest, PipeAddress) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "remote_address" - } - ] - } + const std::string yaml = R"EOF( +actions: +- remote_address: {} )EOF"; - setupTest(json); + setupTest(yaml); Network::Address::PipeInstance pipe_address("/hello"); rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, pipe_address); @@ -415,17 +323,12 @@ TEST_F(RateLimitPolicyEntryTest, PipeAddress) { } TEST_F(RateLimitPolicyEntryTest, SourceService) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "source_cluster" - } - ] - } + const std::string yaml = R"EOF( +actions: +- source_cluster: {} )EOF"; - setupTest(json); + setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, default_remote_address_); @@ -435,17 +338,12 @@ TEST_F(RateLimitPolicyEntryTest, SourceService) { } TEST_F(RateLimitPolicyEntryTest, DestinationService) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "destination_cluster" - } - ] - } + const std::string yaml = R"EOF( +actions: +- destination_cluster: {} )EOF"; - setupTest(json); + setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, default_remote_address_); @@ -455,19 +353,14 @@ TEST_F(RateLimitPolicyEntryTest, DestinationService) { } TEST_F(RateLimitPolicyEntryTest, RequestHeaders) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "request_headers", - "header_name": "x-header-name", - "descriptor_key": "my_header_name" - } - ] - } + const std::string yaml = R"EOF( +actions: +- request_headers: + header_name: x-header-name + descriptor_key: my_header_name )EOF"; - setupTest(json); + setupTest(yaml); Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, @@ -477,19 +370,14 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeaders) { } TEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "request_headers", - "header_name": "x-header", - "descriptor_key": "my_header_name" - } - ] - } + const std::string yaml = R"EOF( +actions: +- request_headers: + header_name: x-header + descriptor_key: my_header_name )EOF"; - setupTest(json); + setupTest(yaml); Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, @@ -498,18 +386,13 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) { } TEST_F(RateLimitPolicyEntryTest, RateLimitKey) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "generic_key", - "descriptor_value": "fake_key" - } - ] - } + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: fake_key )EOF"; - setupTest(json); + setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_); @@ -518,25 +401,16 @@ TEST_F(RateLimitPolicyEntryTest, RateLimitKey) { } TEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "header_value_match", - "descriptor_value": "fake_value", - "headers": [ - { - "name": "x-header-name", - "value": "test_value", - "regex": false - } - ] - } - ] - } + const std::string yaml = R"EOF( +actions: +- header_value_match: + descriptor_value: fake_value + headers: + - name: x-header-name + exact_match: test_value )EOF"; - setupTest(json); + setupTest(yaml); Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); @@ -545,25 +419,16 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) { } TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchNoMatch) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "header_value_match", - "descriptor_value": "fake_value", - "headers": [ - { - "name": "x-header-name", - "value": "test_value", - "regex": false - } - ] - } - ] - } + const std::string yaml = R"EOF( +actions: +- header_value_match: + descriptor_value: fake_value + headers: + - name: x-header-name + exact_match: test_value )EOF"; - setupTest(json); + setupTest(yaml); Http::TestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); @@ -571,26 +436,17 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchNoMatch) { } TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersNotPresent) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "header_value_match", - "descriptor_value": "fake_value", - "expect_match": false, - "headers": [ - { - "name": "x-header-name", - "value": "test_value", - "regex": false - } - ] - } - ] - } + const std::string yaml = R"EOF( +actions: +- header_value_match: + descriptor_value: fake_value + expect_match: false + headers: + - name: x-header-name + exact_match: test_value )EOF"; - setupTest(json); + setupTest(yaml); Http::TestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); @@ -599,26 +455,17 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersNotPresent) { } TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersPresent) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "header_value_match", - "descriptor_value": "fake_value", - "expect_match": false, - "headers": [ - { - "name": "x-header-name", - "value": "test_value", - "regex": false - } - ] - } - ] - } + const std::string yaml = R"EOF( +actions: +- header_value_match: + descriptor_value: fake_value + expect_match: false + headers: + - name: x-header-name + exact_match: test_value )EOF"; - setupTest(json); + setupTest(yaml); Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); @@ -626,20 +473,13 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersPresent) { } TEST_F(RateLimitPolicyEntryTest, CompoundActions) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "destination_cluster" - }, - { - "type": "source_cluster" - } - ] - } + const std::string yaml = R"EOF( +actions: +- destination_cluster: {} +- source_cluster: {} )EOF"; - setupTest(json); + setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, default_remote_address_); @@ -650,28 +490,17 @@ TEST_F(RateLimitPolicyEntryTest, CompoundActions) { } TEST_F(RateLimitPolicyEntryTest, CompoundActionsNoDescriptor) { - std::string json = R"EOF( - { - "actions": [ - { - "type": "destination_cluster" - }, - { - "type": "header_value_match", - "descriptor_value": "fake_value", - "headers": [ - { - "name": "x-header-name", - "value": "test_value", - "regex": false - } - ] - } - ] - } + const std::string yaml = R"EOF( +actions: +- destination_cluster: {} +- header_value_match: + descriptor_value: fake_value + headers: + - name: x-header-name + exact_match: test_value )EOF"; - setupTest(json); + setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, default_remote_address_); From ed9e04277328486d397ce074517516824a877922 Mon Sep 17 00:00:00 2001 From: noctarius aka Christoph Engelbert Date: Thu, 28 Mar 2019 21:43:08 +0100 Subject: [PATCH 034/165] docs: adds information about the Envoy tracer from Instana #6371 (#6416) Signed-off-by: Christoph Engelbert --- docs/root/intro/arch_overview/tracing.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/docs/root/intro/arch_overview/tracing.rst b/docs/root/intro/arch_overview/tracing.rst index 22d4f16410bd7..47c635d5cb432 100644 --- a/docs/root/intro/arch_overview/tracing.rst +++ b/docs/root/intro/arch_overview/tracing.rst @@ -12,14 +12,18 @@ sources of latency. Envoy supports three features related to system wide tracing * **Request ID generation**: Envoy will generate UUIDs when needed and populate the :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the x-request-id header for unified logging as well as tracing. -* **External trace service integration**: Envoy supports pluggable external trace visualization - providers. Currently Envoy supports `LightStep `_, `Zipkin `_ - or any Zipkin compatible backends (e.g. `Jaeger `_), and - `Datadog `_. - However, support for other tracing providers would not be difficult to add. * **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can be used to join untrusted request IDs to the trusted internal :ref:`config_http_conn_man_headers_x-request-id`. +* **External trace service integration**: Envoy supports pluggable external trace visualization + providers, that are divided into two subgroups: + + - External tracers which are part of the Envoy code base, like `LightStep `_, + `Zipkin `_ or any Zipkin compatible backends (e.g. `Jaeger `_), and + `Datadog `_. + - External tracers which come as a third party plugin, like `Instana `_. + +Support for other tracing providers would not be difficult to add. How to initiate a trace ----------------------- From af26857cead7501d622d1a94d15dfa2d6dc99e3b Mon Sep 17 00:00:00 2001 From: Fred Douglas <43351173+fredlas@users.noreply.github.com> Date: Fri, 29 Mar 2019 09:33:10 -0400 Subject: [PATCH 035/165] config: de-templatize source/common/config (#6391) A refactor to remove a bunch of templating from the xDS implementation, all the way down to the Subscription and SubscriptionCallbacks interfaces. Until now, the subscription implementations have all been templated on resource proto type, but that template type is really only used to convert Any protobuf blobs to the appropriate type before handing them off to the RDS, CDS, etc code. This PR has those Any blobs remain unconverted until they reach e.g. rds_impl.cc, which knows to MessageUtil::anyConvert() them. Removing understanding of type from the xDS protocol implementation brings things more into line with discovery.proto, which view xDS resources as Any blobs, and generally knows nothing about resource type. Contributes to #5270. Risk Level: low Testing: existing tests Signed-off-by: Fred Douglas --- include/envoy/config/subscription.h | 17 +- .../common/config/delta_subscription_impl.h | 17 +- .../config/filesystem_subscription_impl.h | 14 +- .../config/grpc_mux_subscription_impl.h | 23 +- source/common/config/grpc_subscription_impl.h | 14 +- source/common/config/http_subscription_impl.h | 10 +- source/common/config/subscription_factory.h | 34 +- source/common/router/rds_impl.cc | 14 +- source/common/router/rds_impl.h | 12 +- source/common/secret/sds_api.cc | 11 +- source/common/secret/sds_api.h | 7 +- source/common/upstream/cds_api_impl.cc | 19 +- source/common/upstream/cds_api_impl.h | 7 +- source/common/upstream/eds.cc | 13 +- source/common/upstream/eds.h | 8 +- source/server/lds_api.cc | 30 +- source/server/lds_api.h | 7 +- .../config/config_provider_impl_test.cc | 24 +- .../config/delta_subscription_test_harness.h | 13 +- .../filesystem_subscription_impl_test.cc | 2 +- .../filesystem_subscription_test_harness.h | 12 +- .../config/grpc_subscription_test_harness.h | 13 +- .../config/http_subscription_test_harness.h | 13 +- .../config/subscription_factory_test.cc | 7 +- test/common/router/rds_impl_test.cc | 15 +- test/common/secret/sds_api_test.cc | 43 +- .../common/secret/secret_manager_impl_test.cc | 7 +- test/common/upstream/cds_api_impl_test.cc | 49 ++- test/common/upstream/eds_test.cc | 378 ++++++++---------- test/mocks/config/mocks.h | 15 +- test/server/lds_api_test.cc | 40 +- 31 files changed, 420 insertions(+), 468 deletions(-) diff --git a/include/envoy/config/subscription.h b/include/envoy/config/subscription.h index b81d769585fb7..2897e9798befc 100644 --- a/include/envoy/config/subscription.h +++ b/include/envoy/config/subscription.h @@ -13,11 +13,9 @@ namespace Envoy { namespace Config { -template class SubscriptionCallbacks { +class SubscriptionCallbacks { public: - typedef Protobuf::RepeatedPtrField ResourceVector; - - virtual ~SubscriptionCallbacks() {} + virtual ~SubscriptionCallbacks() = default; /** * Called when a configuration update is received. @@ -27,7 +25,7 @@ template class SubscriptionCallbacks { * is accepted. Accepted configurations have their version_info reflected in subsequent * requests. */ - virtual void onConfigUpdate(const ResourceVector& resources, + virtual void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, const std::string& version_info) PURE; // TODO(fredlas) it is a HACK that there are two of these. After delta CDS is merged, @@ -62,12 +60,11 @@ template class SubscriptionCallbacks { /** * Common abstraction for subscribing to versioned config updates. This may be implemented via bidi - * gRPC streams, periodic/long polling REST or inotify filesystem updates. ResourceType is expected - * to be a protobuf serializable object. + * gRPC streams, periodic/long polling REST or inotify filesystem updates. */ -template class Subscription { +class Subscription { public: - virtual ~Subscription() {} + virtual ~Subscription() = default; /** * Start a configuration subscription asynchronously. This should be called once and will continue @@ -77,7 +74,7 @@ template class Subscription { * result in the deletion of the Subscription object. */ virtual void start(const std::vector& resources, - SubscriptionCallbacks& callbacks) PURE; + SubscriptionCallbacks& callbacks) PURE; /** * Update the resources to fetch. diff --git a/source/common/config/delta_subscription_impl.h b/source/common/config/delta_subscription_impl.h index deeceec73d7c8..5cd583f357a31 100644 --- a/source/common/config/delta_subscription_impl.h +++ b/source/common/config/delta_subscription_impl.h @@ -28,23 +28,21 @@ struct ResourceNameDiff { * Manages the logic of a (non-aggregated) delta xDS subscription. * TODO(fredlas) add aggregation support. */ -template class DeltaSubscriptionImpl - : public Subscription, + : public Subscription, public GrpcStream { public: DeltaSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, - Runtime::RandomGenerator& random, Stats::Scope& scope, - const RateLimitSettings& rate_limit_settings, SubscriptionStats stats, - std::chrono::milliseconds init_fetch_timeout) + absl::string_view type_url, Runtime::RandomGenerator& random, + Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, + SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout) : GrpcStream(std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), - type_url_(Grpc::Common::typeUrl(ResourceType().GetDescriptor()->full_name())), - local_info_(local_info), stats_(stats), dispatcher_(dispatcher), + type_url_(type_url), local_info_(local_info), stats_(stats), dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout) { request_.set_type_url(type_url_); request_.mutable_node()->MergeFrom(local_info_.node()); @@ -193,8 +191,7 @@ class DeltaSubscriptionImpl } // Config::DeltaSubscription - void start(const std::vector& resources, - SubscriptionCallbacks& callbacks) override { + void start(const std::vector& resources, SubscriptionCallbacks& callbacks) override { callbacks_ = &callbacks; if (init_fetch_timeout_.count() > 0) { @@ -271,7 +268,7 @@ class DeltaSubscriptionImpl std::unordered_set resource_names_; const std::string type_url_; - SubscriptionCallbacks* callbacks_{}; + SubscriptionCallbacks* callbacks_{}; // In-flight or previously sent request. envoy::api::v2::DeltaDiscoveryRequest request_; // Paused via pause()? diff --git a/source/common/config/filesystem_subscription_impl.h b/source/common/config/filesystem_subscription_impl.h index 27b7540d27f3c..5a9344d994876 100644 --- a/source/common/config/filesystem_subscription_impl.h +++ b/source/common/config/filesystem_subscription_impl.h @@ -18,13 +18,12 @@ namespace Config { /** * Filesystem inotify implementation of the API Subscription interface. This allows the API to be * consumed on filesystem changes to files containing the JSON canonical representation of - * lists of ResourceType. + * lists of xDS resources. */ -template -class FilesystemSubscriptionImpl : public Config::Subscription, +class FilesystemSubscriptionImpl : public Config::Subscription, Logger::Loggable { public: - FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, const std::string& path, + FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path, SubscriptionStats stats, Api::Api& api) : path_(path), watcher_(dispatcher.createFilesystemWatcher()), stats_(stats), api_(api) { watcher_->addWatch(path_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t events) { @@ -37,7 +36,7 @@ class FilesystemSubscriptionImpl : public Config::Subscription, // Config::Subscription void start(const std::vector& resources, - Config::SubscriptionCallbacks& callbacks) override { + Config::SubscriptionCallbacks& callbacks) override { // We report all discovered resources in the watched file. UNREFERENCED_PARAMETER(resources); callbacks_ = &callbacks; @@ -61,9 +60,8 @@ class FilesystemSubscriptionImpl : public Config::Subscription, try { envoy::api::v2::DiscoveryResponse message; MessageUtil::loadFromFile(path_, message, api_); - const auto typed_resources = Config::Utility::getTypedResources(message); config_update_available = true; - callbacks_->onConfigUpdate(typed_resources, message.version_info()); + callbacks_->onConfigUpdate(message.resources(), message.version_info()); stats_.version_.set(HashUtil::xxHash64(message.version_info())); stats_.update_success_.inc(); ENVOY_LOG(debug, "Filesystem config update accepted for {}: {}", path_, @@ -83,7 +81,7 @@ class FilesystemSubscriptionImpl : public Config::Subscription, bool started_{}; const std::string path_; std::unique_ptr watcher_; - SubscriptionCallbacks* callbacks_{}; + SubscriptionCallbacks* callbacks_{}; SubscriptionStats stats_; Api::Api& api_; }; diff --git a/source/common/config/grpc_mux_subscription_impl.h b/source/common/config/grpc_mux_subscription_impl.h index 1da9a2f6a891d..5526651ae7f87 100644 --- a/source/common/config/grpc_mux_subscription_impl.h +++ b/source/common/config/grpc_mux_subscription_impl.h @@ -16,20 +16,18 @@ namespace Config { /** * Adapter from typed Subscription to untyped GrpcMux. Also handles per-xDS API stats/logging. */ -template -class GrpcMuxSubscriptionImpl : public Subscription, +class GrpcMuxSubscriptionImpl : public Subscription, GrpcMuxCallbacks, Logger::Loggable { public: - GrpcMuxSubscriptionImpl(GrpcMux& grpc_mux, SubscriptionStats stats, Event::Dispatcher& dispatcher, + GrpcMuxSubscriptionImpl(GrpcMux& grpc_mux, SubscriptionStats stats, absl::string_view type_url, + Event::Dispatcher& dispatcher, std::chrono::milliseconds init_fetch_timeout) - : grpc_mux_(grpc_mux), stats_(stats), - type_url_(Grpc::Common::typeUrl(ResourceType().GetDescriptor()->full_name())), - dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout) {} + : grpc_mux_(grpc_mux), stats_(stats), type_url_(type_url), dispatcher_(dispatcher), + init_fetch_timeout_(init_fetch_timeout) {} // Config::Subscription - void start(const std::vector& resources, - SubscriptionCallbacks& callbacks) override { + void start(const std::vector& resources, SubscriptionCallbacks& callbacks) override { callbacks_ = &callbacks; if (init_fetch_timeout_.count() > 0) { @@ -56,21 +54,16 @@ class GrpcMuxSubscriptionImpl : public Subscription, void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, const std::string& version_info) override { disableInitFetchTimeoutTimer(); - Protobuf::RepeatedPtrField typed_resources; - std::transform(resources.cbegin(), resources.cend(), - Protobuf::RepeatedPtrFieldBackInserter(&typed_resources), - MessageUtil::anyConvert); // TODO(mattklein123): In the future if we start tracking per-resource versions, we need to // supply those versions to onConfigUpdate() along with the xDS response ("system") // version_info. This way, both types of versions can be tracked and exposed for debugging by // the configuration update targets. - callbacks_->onConfigUpdate(typed_resources, version_info); + callbacks_->onConfigUpdate(resources, version_info); stats_.update_success_.inc(); stats_.update_attempt_.inc(); stats_.version_.set(HashUtil::xxHash64(version_info)); ENVOY_LOG(debug, "gRPC config for {} accepted with {} resources with version {}", type_url_, resources.size(), version_info); - ENVOY_LOG(trace, "resources: {}", RepeatedPtrUtil::debugString(typed_resources)); } void onConfigUpdateFailed(const EnvoyException* e) override { @@ -102,7 +95,7 @@ class GrpcMuxSubscriptionImpl : public Subscription, GrpcMux& grpc_mux_; SubscriptionStats stats_; const std::string type_url_; - SubscriptionCallbacks* callbacks_{}; + SubscriptionCallbacks* callbacks_{}; GrpcMuxWatchPtr watch_{}; Event::Dispatcher& dispatcher_; std::chrono::milliseconds init_fetch_timeout_; diff --git a/source/common/config/grpc_subscription_impl.h b/source/common/config/grpc_subscription_impl.h index 1a0d289bd1871..04b1b2aa6981f 100644 --- a/source/common/config/grpc_subscription_impl.h +++ b/source/common/config/grpc_subscription_impl.h @@ -12,21 +12,21 @@ namespace Envoy { namespace Config { -template -class GrpcSubscriptionImpl : public Config::Subscription { +class GrpcSubscriptionImpl : public Config::Subscription { public: GrpcSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, - const Protobuf::MethodDescriptor& service_method, SubscriptionStats stats, - Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, + const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, + SubscriptionStats stats, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, std::chrono::milliseconds init_fetch_timeout) : grpc_mux_(local_info, std::move(async_client), dispatcher, service_method, random, scope, rate_limit_settings), - grpc_mux_subscription_(grpc_mux_, stats, dispatcher, init_fetch_timeout) {} + grpc_mux_subscription_(grpc_mux_, stats, type_url, dispatcher, init_fetch_timeout) {} // Config::Subscription void start(const std::vector& resources, - Config::SubscriptionCallbacks& callbacks) override { + Config::SubscriptionCallbacks& callbacks) override { // Subscribe first, so we get failure callbacks if grpc_mux_.start() fails. grpc_mux_subscription_.start(resources, callbacks); grpc_mux_.start(); @@ -40,7 +40,7 @@ class GrpcSubscriptionImpl : public Config::Subscription { private: GrpcMuxImpl grpc_mux_; - GrpcMuxSubscriptionImpl grpc_mux_subscription_; + GrpcMuxSubscriptionImpl grpc_mux_subscription_; }; } // namespace Config diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index ccce1a91deaf1..0ae9e6a2287e0 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -27,9 +27,8 @@ namespace Config { * canonical representation of DiscoveryResponse. This implementation is responsible for translating * between the proto serializable objects in the Subscription API and the REST JSON representation. */ -template class HttpSubscriptionImpl : public Http::RestApiFetcher, - public Config::Subscription, + public Config::Subscription, Logger::Loggable { public: HttpSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, @@ -50,7 +49,7 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, // Config::Subscription void start(const std::vector& resources, - Config::SubscriptionCallbacks& callbacks) override { + Config::SubscriptionCallbacks& callbacks) override { ASSERT(callbacks_ == nullptr); if (init_fetch_timeout_.count() > 0) { @@ -97,9 +96,8 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, handleFailure(nullptr); return; } - const auto typed_resources = Config::Utility::getTypedResources(message); try { - callbacks_->onConfigUpdate(typed_resources, message.version_info()); + callbacks_->onConfigUpdate(message.resources(), message.version_info()); request_.set_version_info(message.version_info()); stats_.version_.set(HashUtil::xxHash64(request_.version_info())); stats_.update_success_.inc(); @@ -133,7 +131,7 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, std::string path_; Protobuf::RepeatedPtrField resources_; - Config::SubscriptionCallbacks* callbacks_{}; + Config::SubscriptionCallbacks* callbacks_{}; envoy::api::v2::DiscoveryRequest request_; SubscriptionStats stats_; Event::Dispatcher& dispatcher_; diff --git a/source/common/config/subscription_factory.h b/source/common/config/subscription_factory.h index 36857024f5309..2228ee36c37b8 100644 --- a/source/common/config/subscription_factory.h +++ b/source/common/config/subscription_factory.h @@ -37,19 +37,18 @@ class SubscriptionFactory { * service description). * @param api reference to the Api object */ - template - static std::unique_ptr> subscriptionFromConfigSource( + static std::unique_ptr subscriptionFromConfigSource( const envoy::api::v2::core::ConfigSource& config, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, Stats::Scope& scope, const std::string& rest_method, const std::string& grpc_method, - Api::Api& api) { - std::unique_ptr> result; + absl::string_view type_url, Api::Api& api) { + std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); switch (config.config_source_specifier_case()) { case envoy::api::v2::core::ConfigSource::kPath: { Utility::checkFilesystemSubscriptionBackingPath(config.path(), api); - result.reset(new Config::FilesystemSubscriptionImpl(dispatcher, config.path(), - stats, api)); + result = std::make_unique(dispatcher, config.path(), + stats, api); break; } case envoy::api::v2::core::ConfigSource::kApiConfigSource: { @@ -62,34 +61,34 @@ class SubscriptionFactory { "Please specify an explicit supported api_type in the following config:\n" + config.DebugString()); case envoy::api::v2::core::ApiConfigSource::REST: - result.reset(new HttpSubscriptionImpl( + result = std::make_unique( local_info, cm, api_config_source.cluster_names()[0], dispatcher, random, Utility::apiConfigSourceRefreshDelay(api_config_source), Utility::apiConfigSourceRequestTimeout(api_config_source), *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(rest_method), stats, - Utility::configSourceInitialFetchTimeout(config))); + Utility::configSourceInitialFetchTimeout(config)); break; case envoy::api::v2::core::ApiConfigSource::GRPC: - result.reset(new GrpcSubscriptionImpl( + result = std::make_unique( local_info, Config::Utility::factoryForGrpcApiConfigSource(cm.grpcAsyncClientManager(), api_config_source, scope) ->create(), dispatcher, random, - *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), stats, - scope, Utility::parseRateLimitSettings(api_config_source), - Utility::configSourceInitialFetchTimeout(config))); + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), type_url, + stats, scope, Utility::parseRateLimitSettings(api_config_source), + Utility::configSourceInitialFetchTimeout(config)); break; case envoy::api::v2::core::ApiConfigSource::DELTA_GRPC: { Utility::checkApiConfigSourceSubscriptionBackingCluster(cm.clusters(), api_config_source); - result.reset(new DeltaSubscriptionImpl( + result = std::make_unique( local_info, Config::Utility::factoryForGrpcApiConfigSource(cm.grpcAsyncClientManager(), api_config_source, scope) ->create(), dispatcher, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), - random, scope, Utility::parseRateLimitSettings(api_config_source), stats, - Utility::configSourceInitialFetchTimeout(config))); + type_url, random, scope, Utility::parseRateLimitSettings(api_config_source), stats, + Utility::configSourceInitialFetchTimeout(config)); break; } default: @@ -98,8 +97,9 @@ class SubscriptionFactory { break; } case envoy::api::v2::core::ConfigSource::kAds: { - result.reset(new GrpcMuxSubscriptionImpl( - cm.adsMux(), stats, dispatcher, Utility::configSourceInitialFetchTimeout(config))); + result = std::make_unique( + cm.adsMux(), stats, type_url, dispatcher, + Utility::configSourceInitialFetchTimeout(config)); break; } default: diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 9393b7190f77e..2aa821ad838ee 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -69,12 +69,13 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( last_updated_(factory_context.timeSource().systemTime()) { Envoy::Config::Utility::checkLocalInfo("rds", factory_context.localInfo()); - subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource< - envoy::api::v2::RouteConfiguration>( + subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( rds.config_source(), factory_context.localInfo(), factory_context.dispatcher(), factory_context.clusterManager(), factory_context.random(), *scope_, "envoy.api.v2.RouteDiscoveryService.FetchRoutes", - "envoy.api.v2.RouteDiscoveryService.StreamRoutes", factory_context.api()); + "envoy.api.v2.RouteDiscoveryService.StreamRoutes", + Grpc::Common::typeUrl(envoy::api::v2::RouteConfiguration().GetDescriptor()->full_name()), + factory_context.api()); } RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { @@ -88,8 +89,9 @@ RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { route_config_provider_manager_.route_config_subscriptions_.erase(manager_identifier_); } -void RdsRouteConfigSubscription::onConfigUpdate(const ResourceVector& resources, - const std::string& version_info) { +void RdsRouteConfigSubscription::onConfigUpdate( + const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { last_updated_ = time_source_.systemTime(); if (resources.empty()) { @@ -101,7 +103,7 @@ void RdsRouteConfigSubscription::onConfigUpdate(const ResourceVector& resources, if (resources.size() != 1) { throw EnvoyException(fmt::format("Unexpected RDS resource length: {}", resources.size())); } - const auto& route_config = resources[0]; + auto route_config = MessageUtil::anyConvert(resources[0]); MessageUtil::validate(route_config); // TODO(PiotrSikora): Remove this hack once fixed internally. if (!(route_config.name() == route_config_name_)) { diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index d665a3aed8303..a435e564bc5cf 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -93,15 +93,15 @@ class RdsRouteConfigProviderImpl; * A class that fetches the route configuration dynamically using the RDS API and updates them to * RDS config providers. */ -class RdsRouteConfigSubscription - : Envoy::Config::SubscriptionCallbacks, - Logger::Loggable { +class RdsRouteConfigSubscription : Envoy::Config::SubscriptionCallbacks, + Logger::Loggable { public: - ~RdsRouteConfigSubscription(); + ~RdsRouteConfigSubscription() override; // Config::SubscriptionCallbacks // TODO(fredlas) deduplicate - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; void onConfigUpdate(const Protobuf::RepeatedPtrField&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; @@ -123,9 +123,9 @@ class RdsRouteConfigSubscription const std::string& stat_prefix, RouteConfigProviderManagerImpl& route_config_provider_manager); + std::unique_ptr subscription_; const std::string route_config_name_; Init::TargetImpl init_target_; - std::unique_ptr> subscription_; Stats::ScopePtr scope_; RdsStats stats_; RouteConfigProviderManagerImpl& route_config_provider_manager_; diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 10f04d5ef6281..de62381c29f74 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -29,7 +29,8 @@ SdsApi::SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispat init_manager.add(init_target_); } -void SdsApi::onConfigUpdate(const ResourceVector& resources, const std::string&) { +void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string&) { if (resources.empty()) { throw EnvoyException( fmt::format("Missing SDS resources for {} in onConfigUpdate()", sds_config_name_)); @@ -37,7 +38,7 @@ void SdsApi::onConfigUpdate(const ResourceVector& resources, const std::string&) if (resources.size() != 1) { throw EnvoyException(fmt::format("Unexpected SDS secrets length: {}", resources.size())); } - const auto& secret = resources[0]; + auto secret = MessageUtil::anyConvert(resources[0]); MessageUtil::validate(secret); // Wrap sds_config_name_ in string_view to deal with proto string/std::string incompatibility @@ -64,11 +65,11 @@ void SdsApi::onConfigUpdateFailed(const EnvoyException*) { } void SdsApi::initialize() { - subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource< - envoy::api::v2::auth::Secret>( + subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( sds_config_, local_info_, dispatcher_, cluster_manager_, random_, stats_, "envoy.service.discovery.v2.SecretDiscoveryService.FetchSecrets", - "envoy.service.discovery.v2.SecretDiscoveryService.StreamSecrets", api_); + "envoy.service.discovery.v2.SecretDiscoveryService.StreamSecrets", + Grpc::Common::typeUrl(envoy::api::v2::auth::Secret().GetDescriptor()->full_name()), api_); subscription_->start({sds_config_name_}, *this); } diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index bb6132febd181..13d7790ed157c 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -28,7 +28,7 @@ namespace Secret { /** * SDS API implementation that fetches secrets from SDS server via Subscription. */ -class SdsApi : public Config::SubscriptionCallbacks { +class SdsApi : public Config::SubscriptionCallbacks { public: SdsApi(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, Stats::Store& stats, @@ -38,7 +38,8 @@ class SdsApi : public Config::SubscriptionCallbacks& resources, + const std::string& version_info) override; void onConfigUpdate(const Protobuf::RepeatedPtrField&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; @@ -64,7 +65,7 @@ class SdsApi : public Config::SubscriptionCallbacks> subscription_; + std::unique_ptr subscription_; const std::string sds_config_name_; uint64_t secret_hash_; diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index d62b938bb4d88..79a888a26dc6a 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -34,23 +34,26 @@ CdsApiImpl::CdsApiImpl(const envoy::api::v2::core::ConfigSource& cds_config, Clu envoy::api::v2::core::ApiConfigSource::DELTA_GRPC); const std::string grpc_method = is_delta ? "envoy.api.v2.ClusterDiscoveryService.DeltaClusters" : "envoy.api.v2.ClusterDiscoveryService.StreamClusters"; - subscription_ = - Config::SubscriptionFactory::subscriptionFromConfigSource( - cds_config, local_info, dispatcher, cm, random, *scope_, - "envoy.api.v2.ClusterDiscoveryService.FetchClusters", grpc_method, api); + subscription_ = Config::SubscriptionFactory::subscriptionFromConfigSource( + cds_config, local_info, dispatcher, cm, random, *scope_, + "envoy.api.v2.ClusterDiscoveryService.FetchClusters", grpc_method, + Grpc::Common::typeUrl(envoy::api::v2::Cluster().GetDescriptor()->full_name()), api); } -void CdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::string& version_info) { +void CdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters(); - for (const auto& cluster : resources) { - clusters_to_remove.erase(cluster.name()); + std::vector clusters; + for (const auto& cluster_blob : resources) { + clusters.push_back(MessageUtil::anyConvert(cluster_blob)); + clusters_to_remove.erase(clusters.back().name()); } Protobuf::RepeatedPtrField to_remove_repeated; for (const auto& cluster : clusters_to_remove) { *to_remove_repeated.Add() = cluster.first; } Protobuf::RepeatedPtrField to_add_repeated; - for (const auto& cluster : resources) { + for (const auto& cluster : clusters) { envoy::api::v2::Resource* to_add = to_add_repeated.Add(); to_add->set_name(cluster.name()); to_add->set_version(version_info); diff --git a/source/common/upstream/cds_api_impl.h b/source/common/upstream/cds_api_impl.h index 77e215259fd75..2550577880e57 100644 --- a/source/common/upstream/cds_api_impl.h +++ b/source/common/upstream/cds_api_impl.h @@ -19,7 +19,7 @@ namespace Upstream { * CDS API implementation that fetches via Subscription. */ class CdsApiImpl : public CdsApi, - Config::SubscriptionCallbacks, + Config::SubscriptionCallbacks, Logger::Loggable { public: static CdsApiPtr create(const envoy::api::v2::core::ConfigSource& cds_config, ClusterManager& cm, @@ -36,7 +36,8 @@ class CdsApiImpl : public CdsApi, // Config::SubscriptionCallbacks // TODO(fredlas) deduplicate - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; void onConfigUpdate(const Protobuf::RepeatedPtrField&, const Protobuf::RepeatedPtrField&, const std::string&) override; void onConfigUpdateFailed(const EnvoyException* e) override; @@ -51,7 +52,7 @@ class CdsApiImpl : public CdsApi, void runInitializeCallbackIfAny(); ClusterManager& cm_; - std::unique_ptr> subscription_; + std::unique_ptr subscription_; std::string system_version_info_; std::function initialize_callback_; Stats::ScopePtr scope_; diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 91c12b9695d15..7735114e50665 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -23,11 +23,12 @@ EdsClusterImpl::EdsClusterImpl( Event::Dispatcher& dispatcher = factory_context.dispatcher(); Runtime::RandomGenerator& random = factory_context.random(); Upstream::ClusterManager& cm = factory_context.clusterManager(); - subscription_ = Config::SubscriptionFactory::subscriptionFromConfigSource< - envoy::api::v2::ClusterLoadAssignment>( + subscription_ = Config::SubscriptionFactory::subscriptionFromConfigSource( eds_config, local_info_, dispatcher, cm, random, info_->statsScope(), "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", factory_context.api()); + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", + Grpc::Common::typeUrl(envoy::api::v2::ClusterLoadAssignment().GetDescriptor()->full_name()), + factory_context.api()); } void EdsClusterImpl::startPreInit() { subscription_->start({cluster_name_}, *this); } @@ -97,7 +98,8 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h parent_.onPreInitComplete(); } -void EdsClusterImpl::onConfigUpdate(const ResourceVector& resources, const std::string&) { +void EdsClusterImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string&) { if (resources.empty()) { ENVOY_LOG(debug, "Missing ClusterLoadAssignment for {} in onConfigUpdate()", cluster_name_); info_->stats().update_empty_.inc(); @@ -107,7 +109,8 @@ void EdsClusterImpl::onConfigUpdate(const ResourceVector& resources, const std:: if (resources.size() != 1) { throw EnvoyException(fmt::format("Unexpected EDS resource length: {}", resources.size())); } - const auto& cluster_load_assignment = resources[0]; + auto cluster_load_assignment = + MessageUtil::anyConvert(resources[0]); MessageUtil::validate(cluster_load_assignment); // TODO(PiotrSikora): Remove this hack once fixed internally. if (!(cluster_load_assignment.cluster_name() == cluster_name_)) { diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index 197d654a57815..2194ef2d22344 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -19,8 +19,7 @@ namespace Upstream { /** * Cluster implementation that reads host information from the Endpoint Discovery Service. */ -class EdsClusterImpl : public BaseDynamicClusterImpl, - Config::SubscriptionCallbacks { +class EdsClusterImpl : public BaseDynamicClusterImpl, Config::SubscriptionCallbacks { public: EdsClusterImpl(const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, @@ -32,7 +31,8 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, // Config::SubscriptionCallbacks // TODO(fredlas) deduplicate - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; void onConfigUpdate(const Protobuf::RepeatedPtrField&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; @@ -69,7 +69,7 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, }; const ClusterManager& cm_; - std::unique_ptr> subscription_; + std::unique_ptr subscription_; const LocalInfo::LocalInfo& local_info_; const std::string cluster_name_; std::vector locality_weights_map_; diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 10aa4b611d121..92c4ec6824278 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -22,28 +22,32 @@ LdsApiImpl::LdsApiImpl(const envoy::api::v2::core::ConfigSource& lds_config, ListenerManager& lm, Api::Api& api) : listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), init_target_("LDS", [this]() { subscription_->start({}, *this); }) { - subscription_ = - Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( - lds_config, local_info, dispatcher, cm, random, *scope_, - "envoy.api.v2.ListenerDiscoveryService.FetchListeners", - "envoy.api.v2.ListenerDiscoveryService.StreamListeners", api); + subscription_ = Envoy::Config::SubscriptionFactory::subscriptionFromConfigSource( + lds_config, local_info, dispatcher, cm, random, *scope_, + "envoy.api.v2.ListenerDiscoveryService.FetchListeners", + "envoy.api.v2.ListenerDiscoveryService.StreamListeners", + Grpc::Common::typeUrl(envoy::api::v2::Listener().GetDescriptor()->full_name()), api); Config::Utility::checkLocalInfo("lds", local_info); init_manager.add(init_target_); } -void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::string& version_info) { +void LdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { cm_.adsMux().pause(Config::TypeUrl::get().RouteConfiguration); Cleanup rds_resume([this] { cm_.adsMux().resume(Config::TypeUrl::get().RouteConfiguration); }); + + std::vector listeners; + for (const auto& listener_blob : resources) { + listeners.push_back(MessageUtil::anyConvert(listener_blob)); + MessageUtil::validate(listeners.back()); + } std::vector exception_msgs; std::unordered_set listener_names; - for (const auto& listener : resources) { + for (const auto& listener : listeners) { if (!listener_names.insert(listener.name()).second) { throw EnvoyException(fmt::format("duplicate listener {} found", listener.name())); } } - for (const auto& listener : resources) { - MessageUtil::validate(listener); - } // We need to keep track of which listeners we might need to remove. std::unordered_map> listeners_to_remove; @@ -54,7 +58,7 @@ void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri for (const auto& listener : listener_manager_.listeners()) { listeners_to_remove.emplace(listener.get().name(), listener); } - for (const auto& listener : resources) { + for (const auto& listener : listeners) { listeners_to_remove.erase(listener.name()); } for (const auto& listener : listeners_to_remove) { @@ -63,8 +67,8 @@ void LdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri } } - for (const auto& listener : resources) { - const std::string listener_name = listener.name(); + for (const auto& listener : listeners) { + const std::string& listener_name = listener.name(); try { if (listener_manager_.addOrUpdateListener(listener, version_info, true)) { ENVOY_LOG(info, "lds: add/update listener '{}'", listener_name); diff --git a/source/server/lds_api.h b/source/server/lds_api.h index 647e5664ea240..859d26a641b71 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -19,7 +19,7 @@ namespace Server { * LDS API implementation that fetches via Subscription. */ class LdsApiImpl : public LdsApi, - Config::SubscriptionCallbacks, + Config::SubscriptionCallbacks, Logger::Loggable { public: LdsApiImpl(const envoy::api::v2::core::ConfigSource& lds_config, Upstream::ClusterManager& cm, @@ -32,7 +32,8 @@ class LdsApiImpl : public LdsApi, // Config::SubscriptionCallbacks // TODO(fredlas) deduplicate - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override; void onConfigUpdate(const Protobuf::RepeatedPtrField&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; @@ -43,7 +44,7 @@ class LdsApiImpl : public LdsApi, } private: - std::unique_ptr> subscription_; + std::unique_ptr subscription_; std::string version_info_; ListenerManager& listener_manager_; Stats::ScopePtr scope_; diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index 56d6ab008304f..306c09c015a79 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -38,9 +38,8 @@ class StaticDummyConfigProvider : public ImmutableConfigProviderImplBase { test::common::config::DummyConfig config_proto_; }; -class DummyConfigSubscription - : public ConfigSubscriptionInstanceBase, - Envoy::Config::SubscriptionCallbacks { +class DummyConfigSubscription : public ConfigSubscriptionInstanceBase, + Envoy::Config::SubscriptionCallbacks { public: DummyConfigSubscription(const uint64_t manager_identifier, Server::Configuration::FactoryContext& factory_context, @@ -53,8 +52,9 @@ class DummyConfigSubscription // Envoy::Config::SubscriptionCallbacks // TODO(fredlas) deduplicate - void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override { - const auto& config = resources[0]; + void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) override { + auto config = MessageUtil::anyConvert(resources[0]); if (checkAndApplyConfig(config, "dummy_config", version_info)) { config_proto_ = config; } @@ -246,12 +246,12 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { // No config protos have been received via the subscription yet. EXPECT_FALSE(provider1->configProtoInfo().has_value()); - Protobuf::RepeatedPtrField dummy_configs; - dummy_configs.Add()->MergeFrom(parseDummyConfigFromYaml("a: a dummy config")); + Protobuf::RepeatedPtrField untyped_dummy_configs; + untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dummy config")); DummyConfigSubscription& subscription = dynamic_cast(*provider1).subscription(); - subscription.onConfigUpdate(dummy_configs, "1"); + subscription.onConfigUpdate(untyped_dummy_configs, "1"); // Check that a newly created provider with the same config source will share // the subscription, config proto and resulting ConfigProvider::Config. @@ -278,7 +278,7 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { dynamic_cast(*provider3) .subscription() - .onConfigUpdate(dummy_configs, "provider3"); + .onConfigUpdate(untyped_dummy_configs, "provider3"); EXPECT_EQ(2UL, static_cast( provider_manager_->dumpConfigs().get()) @@ -346,13 +346,13 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { config_source_proto, factory_context_, "dummy_prefix"); // Static + dynamic config dump. - Protobuf::RepeatedPtrField dummy_configs; - dummy_configs.Add()->MergeFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); + Protobuf::RepeatedPtrField untyped_dummy_configs; + untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567)); DummyConfigSubscription& subscription = dynamic_cast(*dynamic_provider).subscription(); - subscription.onConfigUpdate(dummy_configs, "v1"); + subscription.onConfigUpdate(untyped_dummy_configs, "v1"); message_ptr = factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); const auto& dummy_config_dump3 = diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index 04ed8d553e4cf..1db65172761e3 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -22,8 +22,6 @@ namespace Envoy { namespace Config { namespace { -typedef DeltaSubscriptionImpl DeltaEdsSubscriptionImpl; - class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { public: DeltaSubscriptionTestHarness() : DeltaSubscriptionTestHarness(std::chrono::milliseconds(0)) {} @@ -34,10 +32,10 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { node_.set_id("fo0"); EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_)); EXPECT_CALL(dispatcher_, createTimer_(_)); - subscription_ = std::make_unique( + subscription_ = std::make_unique( local_info_, std::unique_ptr(async_client_), dispatcher_, - *method_descriptor_, random_, stats_store_, rate_limit_settings_, stats_, - init_fetch_timeout); + *method_descriptor_, Config::TypeUrl::get().ClusterLoadAssignment, random_, stats_store_, + rate_limit_settings_, stats_, init_fetch_timeout); } void startSubscription(const std::vector& cluster_names) override { @@ -73,6 +71,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { error_detail->set_code(error_code); error_detail->set_message(error_message); } + std::cerr << "EXPECTING DiscoveryRequest: " << expected_request.DebugString() << std::endl; EXPECT_CALL(async_stream_, sendMessage(ProtoEq(expected_request), false)); } @@ -139,7 +138,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { Runtime::MockRandomGenerator random_; NiceMock local_info_; Grpc::MockAsyncStream async_stream_; - std::unique_ptr subscription_; + std::unique_ptr subscription_; std::string last_response_nonce_; std::vector last_cluster_names_; Envoy::Config::RateLimitSettings rate_limit_settings_; @@ -150,4 +149,4 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { } // namespace } // namespace Config -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/common/config/filesystem_subscription_impl_test.cc b/test/common/config/filesystem_subscription_impl_test.cc index 1c4a28baeeeb1..2feabde6921fd 100644 --- a/test/common/config/filesystem_subscription_impl_test.cc +++ b/test/common/config/filesystem_subscription_impl_test.cc @@ -42,7 +42,7 @@ TEST(MiscFilesystemSubscriptionImplTest, BadWatch) { auto* watcher = new Filesystem::MockWatcher(); EXPECT_CALL(dispatcher, createFilesystemWatcher_()).WillOnce(Return(watcher)); EXPECT_CALL(*watcher, addWatch(_, _, _)).WillOnce(Throw(EnvoyException("bad path"))); - EXPECT_THROW_WITH_MESSAGE(FilesystemEdsSubscriptionImpl(dispatcher, "##!@/dev/null", stats, *api), + EXPECT_THROW_WITH_MESSAGE(FilesystemSubscriptionImpl(dispatcher, "##!@/dev/null", stats, *api), EnvoyException, "bad path"); } diff --git a/test/common/config/filesystem_subscription_test_harness.h b/test/common/config/filesystem_subscription_test_harness.h index 15bcb6fcd7502..e5a8e85e8f237 100644 --- a/test/common/config/filesystem_subscription_test_harness.h +++ b/test/common/config/filesystem_subscription_test_harness.h @@ -24,9 +24,6 @@ using testing::Return; namespace Envoy { namespace Config { -typedef FilesystemSubscriptionImpl - FilesystemEdsSubscriptionImpl; - class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { public: FilesystemSubscriptionTestHarness() @@ -78,12 +75,7 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { file_json += "]}"; envoy::api::v2::DiscoveryResponse response_pb; MessageUtil::loadFromJson(file_json, response_pb); - EXPECT_CALL(callbacks_, - onConfigUpdate( - RepeatedProtoEq( - Config::Utility::getTypedResources( - response_pb)), - version)) + EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { version_ = version; @@ -123,7 +115,7 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; NiceMock> callbacks_; - FilesystemEdsSubscriptionImpl subscription_; + FilesystemSubscriptionImpl subscription_; bool file_at_start_{false}; }; diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index a01b4588be397..11b5a44208ae1 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -28,8 +28,6 @@ using testing::Return; namespace Envoy { namespace Config { -typedef GrpcSubscriptionImpl GrpcEdsSubscriptionImpl; - class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { public: GrpcSubscriptionTestHarness() : GrpcSubscriptionTestHarness(std::chrono::milliseconds(0)) {} @@ -44,12 +42,13 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { timer_cb_ = timer_cb; return timer_; })); - subscription_ = std::make_unique( + subscription_ = std::make_unique( local_info_, std::unique_ptr(async_client_), dispatcher_, random_, - *method_descriptor_, stats_, stats_store_, rate_limit_settings_, init_fetch_timeout); + *method_descriptor_, Config::TypeUrl::get().ClusterLoadAssignment, stats_, stats_store_, + rate_limit_settings_, init_fetch_timeout); } - ~GrpcSubscriptionTestHarness() { EXPECT_CALL(async_stream_, sendMessage(_, false)); } + ~GrpcSubscriptionTestHarness() override { EXPECT_CALL(async_stream_, sendMessage(_, false)); } void expectSendMessage(const std::vector& cluster_names, const std::string& version) override { @@ -106,7 +105,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { response->add_resources()->PackFrom(*load_assignment); } } - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(typed_resources), version)) + EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response->resources()), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { expectSendMessage(last_cluster_names_, version); @@ -156,7 +155,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { envoy::api::v2::core::Node node_; NiceMock> callbacks_; Grpc::MockAsyncStream async_stream_; - std::unique_ptr subscription_; + std::unique_ptr subscription_; std::string last_response_nonce_; std::vector last_cluster_names_; NiceMock local_info_; diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index ea660caada9f3..3eec9b65e749f 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -30,8 +30,6 @@ using testing::Return; namespace Envoy { namespace Config { -typedef HttpSubscriptionImpl HttpEdsSubscriptionImpl; - class HttpSubscriptionTestHarness : public SubscriptionTestHarness { public: HttpSubscriptionTestHarness() : HttpSubscriptionTestHarness(std::chrono::milliseconds(0)) {} @@ -46,7 +44,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { timer_cb_ = timer_cb; return timer_; })); - subscription_ = std::make_unique( + subscription_ = std::make_unique( local_info_, cm_, "eds_cluster", dispatcher_, random_gen_, std::chrono::milliseconds(1), std::chrono::milliseconds(1000), *method_descriptor_, stats_, init_fetch_timeout); } @@ -119,12 +117,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::HeaderMapPtr response_headers{new Http::TestHeaderMapImpl{{":status", "200"}}}; Http::MessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))}; message->body() = std::make_unique(response_json); - EXPECT_CALL(callbacks_, - onConfigUpdate( - RepeatedProtoEq( - Config::Utility::getTypedResources( - response_pb)), - version)) + EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (!accept) { EXPECT_CALL(callbacks_, onConfigUpdateFailed(_)); @@ -172,7 +165,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::MockAsyncClientRequest http_request_; Http::AsyncClient::Callbacks* http_callbacks_; Config::MockSubscriptionCallbacks callbacks_; - std::unique_ptr subscription_; + std::unique_ptr subscription_; NiceMock local_info_; Event::MockTimer* init_timeout_timer_; }; diff --git a/test/common/config/subscription_factory_test.cc b/test/common/config/subscription_factory_test.cc index 81babecf65c48..6c673162ed332 100644 --- a/test/common/config/subscription_factory_test.cc +++ b/test/common/config/subscription_factory_test.cc @@ -32,12 +32,13 @@ class SubscriptionFactoryTest : public testing::Test { SubscriptionFactoryTest() : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_)) {} - std::unique_ptr> + std::unique_ptr subscriptionFromConfigSource(const envoy::api::v2::core::ConfigSource& config) { - return SubscriptionFactory::subscriptionFromConfigSource( + return SubscriptionFactory::subscriptionFromConfigSource( config, local_info_, dispatcher_, cm_, random_, stats_store_, "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", *api_); + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", + Config::TypeUrl::get().ClusterLoadAssignment, *api_); } Upstream::MockClusterManager cm_; diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 5aa1ac1c96e69..9ac30b18b9079 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -534,8 +534,8 @@ TEST_F(RouteConfigProviderManagerImplTest, Basic) { EXPECT_FALSE(provider_->configInfo().has_value()); - Protobuf::RepeatedPtrField route_configs; - route_configs.Add()->MergeFrom(parseRouteConfigurationFromV2Yaml(R"EOF( + Protobuf::RepeatedPtrField route_configs; + route_configs.Add()->PackFrom(parseRouteConfigurationFromV2Yaml(R"EOF( name: foo_route_config virtual_hosts: - name: bar @@ -614,10 +614,11 @@ name: foo_route_config TEST_F(RouteConfigProviderManagerImplTest, ValidateFail) { setup(); auto& provider_impl = dynamic_cast(*provider_.get()); - Protobuf::RepeatedPtrField route_configs; - auto* route_config = route_configs.Add(); - route_config->set_name("foo_route_config"); - route_config->mutable_virtual_hosts()->Add(); + Protobuf::RepeatedPtrField route_configs; + envoy::api::v2::RouteConfiguration route_config; + route_config.set_name("foo_route_config"); + route_config.mutable_virtual_hosts()->Add(); + route_configs.Add()->PackFrom(route_config); EXPECT_THROW(provider_impl.subscription().onConfigUpdate(route_configs, ""), ProtoValidationException); } @@ -636,7 +637,7 @@ TEST_F(RouteConfigProviderManagerImplTest, onConfigUpdateWrongSize) { setup(); factory_context_.init_manager_.initialize(init_watcher_); auto& provider_impl = dynamic_cast(*provider_.get()); - Protobuf::RepeatedPtrField route_configs; + Protobuf::RepeatedPtrField route_configs; route_configs.Add(); route_configs.Add(); EXPECT_CALL(init_watcher_, ready()); diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index 91108c6c07c0f..e324702f214b1 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -93,10 +93,11 @@ TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem" )EOF"; + envoy::api::v2::auth::Secret typed_secret; + MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); + Protobuf::RepeatedPtrField secret_resources; + secret_resources.Add()->PackFrom(typed_secret); - Protobuf::RepeatedPtrField secret_resources; - auto secret_config = secret_resources.Add(); - MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), *secret_config); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); sds_api.onConfigUpdate(secret_resources, ""); @@ -136,9 +137,10 @@ TEST_F(SdsApiTest, DynamicCertificateValidationContextUpdateSuccess) { allow_expired_certificate: true )EOF"; - Protobuf::RepeatedPtrField secret_resources; - auto secret_config = secret_resources.Add(); - MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), *secret_config); + envoy::api::v2::auth::Secret typed_secret; + MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); + Protobuf::RepeatedPtrField secret_resources; + secret_resources.Add()->PackFrom(typed_secret); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); sds_api.onConfigUpdate(secret_resources, ""); @@ -184,10 +186,9 @@ TEST_F(SdsApiTest, DefaultCertificateValidationContextTest) { validation_callback.validateCvc(cvc); }); - Protobuf::RepeatedPtrField secret_resources; - auto* secret_config = secret_resources.Add(); - secret_config->set_name("abc.com"); - auto* dynamic_cvc = secret_config->mutable_validation_context(); + envoy::api::v2::auth::Secret typed_secret; + typed_secret.set_name("abc.com"); + auto* dynamic_cvc = typed_secret.mutable_validation_context(); dynamic_cvc->set_allow_expired_certificate(false); dynamic_cvc->mutable_trusted_ca()->set_filename(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem")); @@ -197,6 +198,9 @@ TEST_F(SdsApiTest, DefaultCertificateValidationContextTest) { dynamic_cvc->add_verify_certificate_spki(dynamic_verify_certificate_spki); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); EXPECT_CALL(validation_callback, validateCvc(_)); + + Protobuf::RepeatedPtrField secret_resources; + secret_resources.Add()->PackFrom(typed_secret); sds_api.onConfigUpdate(secret_resources, ""); const std::string default_verify_certificate_hash = @@ -243,7 +247,7 @@ TEST_F(SdsApiTest, EmptyResource) { server.stats(), server.clusterManager(), init_manager, config_source, "abc.com", []() {}, *api_); - Protobuf::RepeatedPtrField secret_resources; + Protobuf::RepeatedPtrField secret_resources; EXPECT_THROW_WITH_MESSAGE(sds_api.onConfigUpdate(secret_resources, ""), EnvoyException, "Missing SDS resources for abc.com in onConfigUpdate()"); @@ -268,11 +272,11 @@ TEST_F(SdsApiTest, SecretUpdateWrongSize) { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem" )EOF"; - Protobuf::RepeatedPtrField secret_resources; - auto secret_config_1 = secret_resources.Add(); - MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), *secret_config_1); - auto secret_config_2 = secret_resources.Add(); - MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), *secret_config_2); + envoy::api::v2::auth::Secret typed_secret; + MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); + Protobuf::RepeatedPtrField secret_resources; + secret_resources.Add()->PackFrom(typed_secret); + secret_resources.Add()->PackFrom(typed_secret); EXPECT_THROW_WITH_MESSAGE(sds_api.onConfigUpdate(secret_resources, ""), EnvoyException, "Unexpected SDS secrets length: 2"); @@ -298,9 +302,10 @@ TEST_F(SdsApiTest, SecretUpdateWrongSecretName) { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem" )EOF"; - Protobuf::RepeatedPtrField secret_resources; - auto secret_config = secret_resources.Add(); - MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), *secret_config); + envoy::api::v2::auth::Secret typed_secret; + MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); + Protobuf::RepeatedPtrField secret_resources; + secret_resources.Add()->PackFrom(typed_secret); EXPECT_THROW_WITH_MESSAGE(sds_api.onConfigUpdate(secret_resources, ""), EnvoyException, "Unexpected SDS secret (expecting abc.com): wrong.name.com"); diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 18204d0f085d2..33ce6095967be 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -179,9 +179,10 @@ name: "abc.com" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem" )EOF"; - Protobuf::RepeatedPtrField secret_resources; - auto secret_config = secret_resources.Add(); - MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), *secret_config); + envoy::api::v2::auth::Secret typed_secret; + MessageUtil::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); + Protobuf::RepeatedPtrField secret_resources; + secret_resources.Add()->PackFrom(typed_secret); dynamic_cast(*secret_provider).onConfigUpdate(secret_resources, ""); Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), *api_); const std::string cert_pem = diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index d18aee1f7042d..917e261b50731 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -186,8 +186,9 @@ TEST_F(CdsApiImplTest, ValidateFail) { setup(); - Protobuf::RepeatedPtrField clusters; - clusters.Add(); + Protobuf::RepeatedPtrField clusters; + envoy::api::v2::Cluster cluster; + clusters.Add()->PackFrom(cluster); EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(initialized_, ready()); @@ -201,12 +202,11 @@ TEST_F(CdsApiImplTest, ValidateDuplicateClusters) { setup(); - Protobuf::RepeatedPtrField clusters; - auto* cluster_1 = clusters.Add(); - cluster_1->set_name("duplicate_cluster"); - - auto* cluster_2 = clusters.Add(); - cluster_2->set_name("duplicate_cluster"); + Protobuf::RepeatedPtrField clusters; + envoy::api::v2::Cluster cluster_1; + cluster_1.set_name("duplicate_cluster"); + clusters.Add()->PackFrom(cluster_1); + clusters.Add()->PackFrom(cluster_1); EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(initialized_, ready()); @@ -226,7 +226,7 @@ TEST_F(CdsApiImplTest, EmptyConfigUpdate) { EXPECT_CALL(initialized_, ready()); EXPECT_CALL(request_, cancel()); - Protobuf::RepeatedPtrField clusters; + Protobuf::RepeatedPtrField clusters; dynamic_cast(cds_.get())->onConfigUpdate(clusters, ""); } @@ -240,13 +240,16 @@ TEST_F(CdsApiImplTest, ConfigUpdateWith2ValidClusters) { EXPECT_CALL(initialized_, ready()); EXPECT_CALL(request_, cancel()); - Protobuf::RepeatedPtrField clusters; - auto* cluster_1 = clusters.Add(); - cluster_1->set_name("cluster_1"); + Protobuf::RepeatedPtrField clusters; + + envoy::api::v2::Cluster cluster_1; + cluster_1.set_name("cluster_1"); + clusters.Add()->PackFrom(cluster_1); cm_.expectAdd("cluster_1"); - auto* cluster_2 = clusters.Add(); - cluster_2->set_name("cluster_2"); + envoy::api::v2::Cluster cluster_2; + cluster_2.set_name("cluster_2"); + clusters.Add()->PackFrom(cluster_2); cm_.expectAdd("cluster_2"); dynamic_cast(cds_.get())->onConfigUpdate(clusters, ""); @@ -262,17 +265,21 @@ TEST_F(CdsApiImplTest, ConfigUpdateAddsSecondClusterEvenIfFirstThrows) { EXPECT_CALL(initialized_, ready()); EXPECT_CALL(request_, cancel()); - Protobuf::RepeatedPtrField clusters; - auto* cluster_1 = clusters.Add(); - cluster_1->set_name("cluster_1"); + Protobuf::RepeatedPtrField clusters; + + envoy::api::v2::Cluster cluster_1; + cluster_1.set_name("cluster_1"); + clusters.Add()->PackFrom(cluster_1); cm_.expectAddToThrow("cluster_1", "An exception"); - auto* cluster_2 = clusters.Add(); - cluster_2->set_name("cluster_2"); + envoy::api::v2::Cluster cluster_2; + cluster_2.set_name("cluster_2"); + clusters.Add()->PackFrom(cluster_2); cm_.expectAdd("cluster_2"); - auto* cluster_3 = clusters.Add(); - cluster_3->set_name("cluster_3"); + envoy::api::v2::Cluster cluster_3; + cluster_3.set_name("cluster_3"); + clusters.Add()->PackFrom(cluster_3); cm_.expectAddToThrow("cluster_3", "Another exception"); EXPECT_THROW_WITH_MESSAGE( diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index c3e640e637144..abd69b656e4ef 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -49,6 +49,24 @@ class EdsTest : public testing::Test { )EOF"); } + void resetClusterDrainOnHostRemoval() { + resetCluster(R"EOF( + name: name + connect_timeout: 0.25s + type: EDS + lb_policy: ROUND_ROBIN + drain_connections_on_host_removal: true + eds_cluster_config: + service_name: fare + eds_config: + api_config_source: + api_type: REST + cluster_names: + - eds + refresh_delay: 1s + )EOF"); + } + void resetCluster(const std::string& yaml_config) { local_info_.node_.mutable_locality()->set_zone("us-east-1a"); eds_cluster_ = parseClusterFromV2Yaml(yaml_config); @@ -69,6 +87,13 @@ class EdsTest : public testing::Test { EXPECT_EQ(Cluster::InitializePhase::Secondary, cluster_->initializePhase()); } + void doOnConfigUpdateVerifyNoThrow( + const envoy::api::v2::ClusterLoadAssignment& cluster_load_assignment) { + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(cluster_load_assignment); + VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + } + Stats::IsolatedStoreImpl stats_; Ssl::MockContextManager ssl_context_manager_; envoy::api::v2::Cluster eds_cluster_; @@ -98,14 +123,13 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); cluster_->setHealthChecker(health_checker); - cluster_load_assignment_ = resources_.Add(); - cluster_load_assignment_->set_cluster_name("fare"); + cluster_load_assignment_.set_cluster_name("fare"); for (const auto& port : endpoint_ports) { addEndpoint(port); } - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources_, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_); // Make sure the cluster is rebuilt. EXPECT_EQ(0UL, stats_.counter("cluster.name.update_no_rebuild").value()); @@ -142,7 +166,7 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { } void addEndpoint(const uint32_t port) { - auto* endpoints = cluster_load_assignment_->add_endpoints(); + auto* endpoints = cluster_load_assignment_.add_endpoints(); auto* socket_address = endpoints->add_lb_endpoints() ->mutable_endpoint() ->mutable_address() @@ -152,34 +176,35 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { } void updateEndpointHealthCheckPortAtIndex(const uint32_t index, const uint32_t port) { - cluster_load_assignment_->mutable_endpoints(index) + cluster_load_assignment_.mutable_endpoints(index) ->mutable_lb_endpoints(0) ->mutable_endpoint() ->mutable_health_check_config() ->set_port_value(port); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources_, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_); // Always rebuild if health check config is changed. EXPECT_EQ(0UL, stats_.counter("cluster.name.update_no_rebuild").value()); } - Protobuf::RepeatedPtrField resources_; - envoy::api::v2::ClusterLoadAssignment* cluster_load_assignment_; + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment_; }; // Negative test for protoc-gen-validate constraints. TEST_F(EdsTest, ValidateFail) { - Protobuf::RepeatedPtrField resources; - resources.Add(); + envoy::api::v2::ClusterLoadAssignment resource; + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(resource); EXPECT_THROW(cluster_->onConfigUpdate(resources, ""), ProtoValidationException); } // Validate that onConfigUpdate() with unexpected cluster names rejects config. TEST_F(EdsTest, OnConfigUpdateWrongName) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("wrong name"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("wrong name"); + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(cluster_load_assignment); bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); EXPECT_THROW(cluster_->onConfigUpdate(resources, ""), EnvoyException); @@ -198,13 +223,13 @@ TEST_F(EdsTest, OnConfigUpdateEmpty) { // Validate that onConfigUpdate() with unexpected cluster vector size rejects config. TEST_F(EdsTest, OnConfigUpdateWrongSize) { - Protobuf::RepeatedPtrField resources; bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(cluster_load_assignment); + resources.Add()->PackFrom(cluster_load_assignment); EXPECT_THROW(cluster_->onConfigUpdate(resources, ""), EnvoyException); cluster_->onConfigUpdateFailed(nullptr); EXPECT_TRUE(initialized); @@ -212,12 +237,11 @@ TEST_F(EdsTest, OnConfigUpdateWrongSize) { // Validate that onConfigUpdate() with the expected cluster accepts config. TEST_F(EdsTest, OnConfigUpdateSuccess) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); EXPECT_EQ(1UL, stats_.counter("cluster.name.update_no_rebuild").value()); } @@ -237,23 +261,22 @@ TEST_F(EdsTest, NoServiceNameOnSuccessConfigUpdate) { - eds refresh_delay: 1s )EOF"); - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("name"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("name"); bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); } // Validate that onConfigUpdate() updates the endpoint metadata. TEST_F(EdsTest, EndpointMetadata) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - auto* endpoints = cluster_load_assignment->add_endpoints(); - + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* endpoint = endpoints->add_lb_endpoints(); + auto* canary = endpoints->add_lb_endpoints(); + endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address("1.2.3.4"); endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); Config::Metadata::mutableMetadataValue(*endpoint->mutable_metadata(), @@ -263,7 +286,6 @@ TEST_F(EdsTest, EndpointMetadata) { "num_key") .set_number_value(1.1); - auto* canary = endpoints->add_lb_endpoints(); canary->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address("2.3.4.5"); canary->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); Config::Metadata::mutableMetadataValue(*canary->mutable_metadata(), @@ -276,7 +298,7 @@ TEST_F(EdsTest, EndpointMetadata) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); EXPECT_EQ(0UL, stats_.counter("cluster.name.update_no_rebuild").value()); @@ -308,14 +330,14 @@ TEST_F(EdsTest, EndpointMetadata) { "v1"); // We don't rebuild with the exact same config. - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_EQ(1UL, stats_.counter("cluster.name.update_no_rebuild").value()); // New resources with Metadata updated. Config::Metadata::mutableMetadataValue(*canary->mutable_metadata(), Config::MetadataFilters::get().ENVOY_LB, "version") .set_string_value("v2"); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); auto& nhosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(nhosts.size(), 2); EXPECT_EQ(Config::Metadata::metadataValue(*nhosts[1]->metadata(), @@ -326,10 +348,9 @@ TEST_F(EdsTest, EndpointMetadata) { // Validate that onConfigUpdate() updates endpoint health status. TEST_F(EdsTest, EndpointHealthStatus) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - auto* endpoints = cluster_load_assignment->add_endpoints(); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + auto* endpoints = cluster_load_assignment.add_endpoints(); // First check that EDS is correctly mapping // envoy::api::v2::core::HealthStatus values to the expected health() status. @@ -355,7 +376,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -370,7 +391,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { // to unhealthy, check we have the expected change in status. endpoints->mutable_lb_endpoints(0)->set_health_status( envoy::api::v2::core::HealthStatus::UNHEALTHY); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(hosts.size(), health_status_expected.size()); @@ -385,7 +406,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { // to healthy, check we have the expected change in status. endpoints->mutable_lb_endpoints(health_status_expected.size() - 1) ->set_health_status(envoy::api::v2::core::HealthStatus::HEALTHY); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(hosts.size(), health_status_expected.size()); @@ -402,7 +423,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); hosts[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); } - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(Host::Health::Unhealthy, hosts[0]->health()); @@ -412,7 +433,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { // active health check failure. endpoints->mutable_lb_endpoints(0)->set_health_status( envoy::api::v2::core::HealthStatus::HEALTHY); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(Host::Health::Unhealthy, hosts[0]->health()); @@ -430,7 +451,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { // Now mark host 0 degraded via EDS, it should be degraded. endpoints->mutable_lb_endpoints(0)->set_health_status( envoy::api::v2::core::HealthStatus::DEGRADED); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(Host::Health::Degraded, hosts[0]->health()); @@ -446,7 +467,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { // Now mark host 0 healthy via EDS, it should still be degraded. endpoints->mutable_lb_endpoints(0)->set_health_status( envoy::api::v2::core::HealthStatus::HEALTHY); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(Host::Health::Degraded, hosts[0]->health()); @@ -459,33 +480,17 @@ TEST_F(EdsTest, EndpointHealthStatus) { // Validate that onConfigUpdate() removes endpoints that are marked as healthy // when configured to do so. TEST_F(EdsTest, EndpointRemoval) { - resetCluster(R"EOF( - name: name - connect_timeout: 0.25s - type: EDS - lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true - eds_cluster_config: - service_name: fare - eds_config: - api_config_source: - api_type: REST - cluster_names: - - eds - refresh_delay: 1s - )EOF"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + resetClusterDrainOnHostRemoval(); auto health_checker = std::make_shared(); EXPECT_CALL(*health_checker, start()); EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); cluster_->setHealthChecker(health_checker); - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - - auto add_endpoint = [cluster_load_assignment](int port) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto add_endpoint = [&cluster_load_assignment](int port) { + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* socket_address = endpoints->add_lb_endpoints() ->mutable_endpoint() @@ -497,8 +502,7 @@ TEST_F(EdsTest, EndpointRemoval) { add_endpoint(80); add_endpoint(81); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -512,10 +516,9 @@ TEST_F(EdsTest, EndpointRemoval) { } // Remove endpoints and add back the port 80 one - cluster_load_assignment->clear_endpoints(); + cluster_load_assignment.clear_endpoints(); add_endpoint(80); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -525,33 +528,17 @@ TEST_F(EdsTest, EndpointRemoval) { // Verifies that if an endpoint is moved to a new priority, the active hc status is preserved. TEST_F(EdsTest, EndpointMovedToNewPriority) { - resetCluster(R"EOF( - name: name - connect_timeout: 0.25s - type: EDS - lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true - eds_cluster_config: - service_name: fare - eds_config: - api_config_source: - api_type: REST - cluster_names: - - eds - refresh_delay: 1s - )EOF"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + resetClusterDrainOnHostRemoval(); auto health_checker = std::make_shared(); EXPECT_CALL(*health_checker, start()); EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); cluster_->setHealthChecker(health_checker); - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - - auto add_endpoint = [cluster_load_assignment](int port, int priority) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto add_endpoint = [&cluster_load_assignment](int port, int priority) { + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); auto* socket_address = endpoints->add_lb_endpoints() @@ -565,7 +552,7 @@ TEST_F(EdsTest, EndpointMovedToNewPriority) { add_endpoint(80, 0); add_endpoint(81, 0); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -579,11 +566,10 @@ TEST_F(EdsTest, EndpointMovedToNewPriority) { } // Moves the endpoints between priorities - cluster_load_assignment->clear_endpoints(); + cluster_load_assignment.clear_endpoints(); add_endpoint(81, 0); add_endpoint(80, 1); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -613,33 +599,17 @@ TEST_F(EdsTest, EndpointMovedToNewPriority) { // Verifies that if an endpoint is moved between priorities, the health check value // of the host is preserved TEST_F(EdsTest, EndpointMoved) { - resetCluster(R"EOF( - name: name - connect_timeout: 0.25s - type: EDS - lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true - eds_cluster_config: - service_name: fare - eds_config: - api_config_source: - api_type: REST - cluster_names: - - eds - refresh_delay: 1s - )EOF"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + resetClusterDrainOnHostRemoval(); auto health_checker = std::make_shared(); EXPECT_CALL(*health_checker, start()); EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); cluster_->setHealthChecker(health_checker); - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - - auto add_endpoint = [cluster_load_assignment](int port, int priority) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto add_endpoint = [&cluster_load_assignment](int port, int priority) { + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); auto* socket_address = endpoints->add_lb_endpoints() @@ -652,8 +622,7 @@ TEST_F(EdsTest, EndpointMoved) { add_endpoint(80, 0); add_endpoint(81, 1); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -676,17 +645,15 @@ TEST_F(EdsTest, EndpointMoved) { } // Moves the endpoints between priorities - cluster_load_assignment->clear_endpoints(); + cluster_load_assignment.clear_endpoints(); add_endpoint(81, 0); add_endpoint(80, 1); - // Verify that no hosts gets added or removed to/from the PrioritySet. cluster_->prioritySet().addMemberUpdateCb([&](const auto& added, const auto& removed) { EXPECT_TRUE(added.empty()); EXPECT_TRUE(removed.empty()); }); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -717,10 +684,9 @@ TEST_F(EdsTest, EndpointMoved) { // Validate that onConfigUpdate() updates the endpoint locality. TEST_F(EdsTest, EndpointLocality) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - auto* endpoints = cluster_load_assignment->add_endpoints(); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* locality = endpoints->mutable_locality(); locality->set_region("oceania"); locality->set_zone("hello"); @@ -745,7 +711,7 @@ TEST_F(EdsTest, EndpointLocality) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -763,12 +729,11 @@ TEST_F(EdsTest, EndpointLocality) { // Validate that onConfigUpdate() does not propagate locality weights to the host set when // locality weighted balancing isn't configured. TEST_F(EdsTest, EndpointLocalityWeightsIgnored) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* locality = endpoints->mutable_locality(); locality->set_region("oceania"); locality->set_zone("hello"); @@ -785,7 +750,7 @@ TEST_F(EdsTest, EndpointLocalityWeightsIgnored) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); EXPECT_EQ(nullptr, cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights()); @@ -794,6 +759,8 @@ TEST_F(EdsTest, EndpointLocalityWeightsIgnored) { // Validate that onConfigUpdate() propagates locality weights to the host set when locality // weighted balancing is configured. TEST_F(EdsTest, EndpointLocalityWeights) { + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); resetCluster(R"EOF( name: name connect_timeout: 0.25s @@ -810,12 +777,9 @@ TEST_F(EdsTest, EndpointLocalityWeights) { - eds refresh_delay: 1s )EOF"); - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* locality = endpoints->mutable_locality(); locality->set_region("oceania"); locality->set_zone("hello"); @@ -831,7 +795,7 @@ TEST_F(EdsTest, EndpointLocalityWeights) { } { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* locality = endpoints->mutable_locality(); locality->set_region("space"); locality->set_zone("station"); @@ -846,7 +810,7 @@ TEST_F(EdsTest, EndpointLocalityWeights) { } { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* locality = endpoints->mutable_locality(); locality->set_region("sugar"); locality->set_zone("candy"); @@ -863,7 +827,7 @@ TEST_F(EdsTest, EndpointLocalityWeights) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); const auto& locality_weights = @@ -877,14 +841,13 @@ TEST_F(EdsTest, EndpointLocalityWeights) { // Validate that onConfigUpdate() removes any locality not referenced in the // config update in each priority. TEST_F(EdsTest, RemoveUnreferencedLocalities) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); uint32_t port = 1000; - auto add_hosts_to_locality = [cluster_load_assignment, + auto add_hosts_to_locality = [&cluster_load_assignment, &port](const std::string& region, const std::string& zone, const std::string& sub_zone, uint32_t n, uint32_t priority) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); auto* locality = endpoints->mutable_locality(); locality->set_region(region); @@ -910,7 +873,7 @@ TEST_F(EdsTest, RemoveUnreferencedLocalities) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); { @@ -927,11 +890,10 @@ TEST_F(EdsTest, RemoveUnreferencedLocalities) { // Reset the ClusterLoadAssignment to only contain one of the locality per priority. // This should leave us with only one locality. - cluster_load_assignment->clear_endpoints(); + cluster_load_assignment.clear_endpoints(); add_hosts_to_locality("oceania", "koala", "ingsoc", 4, 0); add_hosts_to_locality("oceania", "bear", "best", 2, 1); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts_per_locality = @@ -946,8 +908,8 @@ TEST_F(EdsTest, RemoveUnreferencedLocalities) { } // Clear out the new ClusterLoadAssignment. This should leave us with 0 localities per priority. - cluster_load_assignment->clear_endpoints(); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + cluster_load_assignment.clear_endpoints(); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts_per_locality = @@ -964,14 +926,13 @@ TEST_F(EdsTest, RemoveUnreferencedLocalities) { // Validate that onConfigUpdate() updates bins hosts per locality as expected. TEST_F(EdsTest, EndpointHostsPerLocality) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); uint32_t port = 1000; - auto add_hosts_to_locality = [cluster_load_assignment, + auto add_hosts_to_locality = [&cluster_load_assignment, &port](const std::string& region, const std::string& zone, const std::string& sub_zone, uint32_t n) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* locality = endpoints->mutable_locality(); locality->set_region(region); locality->set_zone(zone); @@ -992,7 +953,7 @@ TEST_F(EdsTest, EndpointHostsPerLocality) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); { @@ -1010,8 +971,7 @@ TEST_F(EdsTest, EndpointHostsPerLocality) { add_hosts_to_locality("oceania", "koala", "eucalyptus", 3); add_hosts_to_locality("general", "koala", "ingsoc", 5); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts_per_locality = cluster_->prioritySet().hostSetsPerPriority()[0]->hostsPerLocality(); @@ -1033,14 +993,13 @@ TEST_F(EdsTest, EndpointHostsPerLocality) { // Validate that onConfigUpdate() updates all priorities in the prioritySet TEST_F(EdsTest, EndpointHostPerPriority) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); uint32_t port = 1000; - auto add_hosts_to_locality = [cluster_load_assignment, + auto add_hosts_to_locality = [&cluster_load_assignment, &port](const std::string& region, const std::string& zone, const std::string& sub_zone, uint32_t n, uint32_t priority) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); auto* locality = endpoints->mutable_locality(); locality->set_region(region); @@ -1062,7 +1021,7 @@ TEST_F(EdsTest, EndpointHostPerPriority) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); { @@ -1075,11 +1034,10 @@ TEST_F(EdsTest, EndpointHostPerPriority) { EXPECT_EQ(1, hosts.size()); } - cluster_load_assignment->clear_endpoints(); + cluster_load_assignment.clear_endpoints(); add_hosts_to_locality("oceania", "koala", "ingsoc", 4, 0); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -1094,12 +1052,11 @@ TEST_F(EdsTest, EndpointHostPerPriority) { // Validate that onConfigUpdate() updates bins hosts per priority as expected. TEST_F(EdsTest, EndpointHostsPerPriority) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); uint32_t port = 1000; - auto add_hosts_to_priority = [cluster_load_assignment, &port](uint32_t priority, uint32_t n) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto add_hosts_to_priority = [&cluster_load_assignment, &port](uint32_t priority, uint32_t n) { + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); for (uint32_t i = 0; i < n; ++i) { @@ -1118,7 +1075,7 @@ TEST_F(EdsTest, EndpointHostsPerPriority) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); ASSERT_EQ(2, cluster_->prioritySet().hostSetsPerPriority().size()); @@ -1130,8 +1087,7 @@ TEST_F(EdsTest, EndpointHostsPerPriority) { // make sure bad config does no harm. add_hosts_to_priority(0, 2); add_hosts_to_priority(3, 5); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); ASSERT_EQ(4, cluster_->prioritySet().hostSetsPerPriority().size()); EXPECT_EQ(4, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); @@ -1141,9 +1097,9 @@ TEST_F(EdsTest, EndpointHostsPerPriority) { // Update the number of hosts in priority #4. Make sure no other priority // levels are affected. - cluster_load_assignment->clear_endpoints(); + cluster_load_assignment.clear_endpoints(); add_hosts_to_priority(3, 4); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); ASSERT_EQ(4, cluster_->prioritySet().hostSetsPerPriority().size()); EXPECT_EQ(4, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); EXPECT_EQ(1, cluster_->prioritySet().hostSetsPerPriority()[1]->hosts().size()); @@ -1153,13 +1109,12 @@ TEST_F(EdsTest, EndpointHostsPerPriority) { // Make sure config updates with P!=0 are rejected for the local cluster. TEST_F(EdsTest, NoPriorityForLocalCluster) { + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); cm_.local_cluster_name_ = "fare"; - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); uint32_t port = 1000; - auto add_hosts_to_priority = [cluster_load_assignment, &port](uint32_t priority, uint32_t n) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + auto add_hosts_to_priority = [&cluster_load_assignment, &port](uint32_t priority, uint32_t n) { + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); for (uint32_t i = 0; i < n; ++i) { @@ -1178,26 +1133,28 @@ TEST_F(EdsTest, NoPriorityForLocalCluster) { add_hosts_to_priority(1, 1); bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(cluster_load_assignment); EXPECT_THROW_WITH_MESSAGE(cluster_->onConfigUpdate(resources, ""), EnvoyException, "Unexpected non-zero priority for local cluster 'fare'."); // Try an update which only has endpoints with P=0. This should go through. - cluster_load_assignment->clear_endpoints(); + cluster_load_assignment.clear_endpoints(); add_hosts_to_priority(0, 2); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); } // Set up an EDS config with multiple priorities and localities and make sure // they are loaded and reloaded as expected. TEST_F(EdsTest, PriorityAndLocality) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); uint32_t port = 1000; auto add_hosts_to_locality_and_priority = - [cluster_load_assignment, &port](const std::string& region, const std::string& zone, - const std::string& sub_zone, uint32_t priority, uint32_t n) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + [&cluster_load_assignment, &port](const std::string& region, const std::string& zone, + const std::string& sub_zone, uint32_t priority, + uint32_t n) { + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); auto* locality = endpoints->mutable_locality(); locality->set_region(region); @@ -1222,7 +1179,7 @@ TEST_F(EdsTest, PriorityAndLocality) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); { @@ -1248,8 +1205,7 @@ TEST_F(EdsTest, PriorityAndLocality) { // Add one more locality to both priority 0 and priority 1. add_hosts_to_locality_and_priority("oceania", "koala", "eucalyptus", 0, 3); add_hosts_to_locality_and_priority("general", "koala", "ingsoc", 1, 5); - - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); { auto& first_hosts_per_locality = @@ -1283,6 +1239,8 @@ TEST_F(EdsTest, PriorityAndLocality) { // Set up an EDS config with multiple priorities, localities, weights and make sure // they are loaded and reloaded as expected. TEST_F(EdsTest, PriorityAndLocalityWeighted) { + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); resetCluster(R"EOF( name: name connect_timeout: 0.25s @@ -1300,15 +1258,12 @@ TEST_F(EdsTest, PriorityAndLocalityWeighted) { refresh_delay: 1s )EOF"); - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); uint32_t port = 1000; auto add_hosts_to_locality_and_priority = - [cluster_load_assignment, &port](const std::string& region, const std::string& zone, - const std::string& sub_zone, uint32_t priority, uint32_t n, - uint32_t weight) { - auto* endpoints = cluster_load_assignment->add_endpoints(); + [&cluster_load_assignment, &port](const std::string& region, const std::string& zone, + const std::string& sub_zone, uint32_t priority, uint32_t n, + uint32_t weight) { + auto* endpoints = cluster_load_assignment.add_endpoints(); endpoints->set_priority(priority); auto* locality = endpoints->mutable_locality(); locality->set_region(region); @@ -1334,7 +1289,7 @@ TEST_F(EdsTest, PriorityAndLocalityWeighted) { bool initialized = false; cluster_->initialize([&initialized] { initialized = true; }); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_TRUE(initialized); EXPECT_EQ(0UL, stats_.counter("cluster.name.update_no_rebuild").value()); @@ -1368,13 +1323,13 @@ TEST_F(EdsTest, PriorityAndLocalityWeighted) { // This should noop (regression test for earlier bug where we would still // rebuild). - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_EQ(1UL, stats_.counter("cluster.name.update_no_rebuild").value()); // Adjust locality weights, validate that we observe an update. - cluster_load_assignment->mutable_endpoints(0)->mutable_load_balancing_weight()->set_value(60); - cluster_load_assignment->mutable_endpoints(1)->mutable_load_balancing_weight()->set_value(40); - VERBOSE_EXPECT_NO_THROW(cluster_->onConfigUpdate(resources, "")); + cluster_load_assignment.mutable_endpoints(0)->mutable_load_balancing_weight()->set_value(60); + cluster_load_assignment.mutable_endpoints(1)->mutable_load_balancing_weight()->set_value(40); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); EXPECT_EQ(1UL, stats_.counter("cluster.name.update_no_rebuild").value()); } @@ -1458,10 +1413,9 @@ TEST_F(EdsWithHealthCheckUpdateTest, EndpointUpdateHealthCheckConfigWithDrainCon // Throw on adding a new resource with an invalid endpoint (since the given address is invalid). TEST_F(EdsTest, MalformedIP) { - Protobuf::RepeatedPtrField resources; - auto* cluster_load_assignment = resources.Add(); - cluster_load_assignment->set_cluster_name("fare"); - auto* endpoints = cluster_load_assignment->add_endpoints(); + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + auto* endpoints = cluster_load_assignment.add_endpoints(); auto* endpoint = endpoints->add_lb_endpoints(); endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address( @@ -1469,6 +1423,8 @@ TEST_F(EdsTest, MalformedIP) { endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); cluster_->initialize([] {}); + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(cluster_load_assignment); EXPECT_THROW_WITH_MESSAGE(cluster_->onConfigUpdate(resources, ""), EnvoyException, "malformed IP address: foo.bar.com. Consider setting resolver_name or " "setting cluster type to 'STRICT_DNS' or 'LOGICAL_DNS'"); diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index f6d8677642766..f40ae034ae919 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -12,8 +12,7 @@ namespace Envoy { namespace Config { -template -class MockSubscriptionCallbacks : public SubscriptionCallbacks { +template class MockSubscriptionCallbacks : public SubscriptionCallbacks { public: MockSubscriptionCallbacks() { ON_CALL(*this, resourceName(testing::_)) @@ -21,15 +20,15 @@ class MockSubscriptionCallbacks : public SubscriptionCallbacks { return resourceName_(MessageUtil::anyConvert(resource)); })); } + ~MockSubscriptionCallbacks() override {} static std::string resourceName_(const envoy::api::v2::ClusterLoadAssignment& resource) { return resource.cluster_name(); } template static std::string resourceName_(const T& resource) { return resource.name(); } // TODO(fredlas) deduplicate - MOCK_METHOD2_T(onConfigUpdate, - void(const typename SubscriptionCallbacks::ResourceVector& resources, - const std::string& version_info)); + MOCK_METHOD2_T(onConfigUpdate, void(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info)); MOCK_METHOD3_T(onConfigUpdate, void(const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, @@ -38,10 +37,10 @@ class MockSubscriptionCallbacks : public SubscriptionCallbacks { MOCK_METHOD1_T(resourceName, std::string(const ProtobufWkt::Any& resource)); }; -template class MockSubscription : public Subscription { +class MockSubscription : public Subscription { public: - MOCK_METHOD2_T(start, void(const std::vector& resources, - SubscriptionCallbacks& callbacks)); + MOCK_METHOD2_T(start, + void(const std::vector& resources, SubscriptionCallbacks& callbacks)); MOCK_METHOD1_T(updateResources, void(const std::vector& resources)); }; diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index 87e5ff11c921f..9e2c561ba0722 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -113,14 +113,15 @@ class LdsApiTest : public testing::Test { EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(refs)); } - void addListener(Protobuf::RepeatedPtrField& listeners, + void addListener(Protobuf::RepeatedPtrField& listeners, const std::string& listener_name) { - auto listener = listeners.Add(); - listener->set_name(listener_name); - auto socket_address = listener->mutable_address()->mutable_socket_address(); + envoy::api::v2::Listener listener; + listener.set_name(listener_name); + auto socket_address = listener.mutable_address()->mutable_socket_address(); socket_address->set_address(listener_name); socket_address->set_port_value(1); - listener->add_filter_chains(); + listener.add_filter_chains(); + listeners.Add()->PackFrom(listener); } NiceMock cluster_manager_; @@ -148,8 +149,9 @@ TEST_F(LdsApiTest, ValidateFail) { setup(); - Protobuf::RepeatedPtrField listeners; - listeners.Add(); + Protobuf::RepeatedPtrField listeners; + envoy::api::v2::Listener listener; + listeners.Add()->PackFrom(listener); EXPECT_THROW(lds_->onConfigUpdate(listeners, ""), ProtoValidationException); EXPECT_CALL(request_, cancel()); @@ -183,16 +185,16 @@ TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { setup(); - Protobuf::RepeatedPtrField listeners; + Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; // Construct a minimal listener that would pass proto validation. - auto listener = listeners.Add(); - listener->set_name("invalid-listener"); - auto socket_address = listener->mutable_address()->mutable_socket_address(); + envoy::api::v2::Listener listener; + listener.set_name("invalid-listener"); + auto socket_address = listener.mutable_address()->mutable_socket_address(); socket_address->set_address("invalid-address"); socket_address->set_port_value(1); - listener->add_filter_chains(); + listener.add_filter_chains(); EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); @@ -200,6 +202,7 @@ TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { .WillOnce(Throw(EnvoyException("something is wrong"))); EXPECT_CALL(init_watcher_, ready()); + listeners.Add()->PackFrom(listener); EXPECT_THROW_WITH_MESSAGE( lds_->onConfigUpdate(listeners, ""), EnvoyException, "Error adding/updating listener(s) invalid-listener: something is wrong"); @@ -211,7 +214,7 @@ TEST_F(LdsApiTest, EmptyListenersUpdate) { setup(); - Protobuf::RepeatedPtrField listeners; + Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); @@ -227,7 +230,7 @@ TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { setup(); - Protobuf::RepeatedPtrField listeners; + Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; // Add 4 listeners - 2 valid and 2 invalid. @@ -258,12 +261,9 @@ TEST_F(LdsApiTest, ValidateDuplicateListeners) { setup(); - Protobuf::RepeatedPtrField listeners; - auto* listener_1 = listeners.Add(); - listener_1->set_name("duplicate_listener"); - - auto* listener_2 = listeners.Add(); - listener_2->set_name("duplicate_listener"); + Protobuf::RepeatedPtrField listeners; + addListener(listeners, "duplicate_listener"); + addListener(listeners, "duplicate_listener"); EXPECT_THROW_WITH_MESSAGE(lds_->onConfigUpdate(listeners, ""), EnvoyException, "duplicate listener duplicate_listener found"); From f822a7774477fb8d069e275d648cf09ac144647d Mon Sep 17 00:00:00 2001 From: Neil Date: Fri, 29 Mar 2019 22:29:37 +0800 Subject: [PATCH 036/165] Examples: Update gen script of grpc example service (#6372) Signed-off-by: neilhong --- examples/grpc-bridge/service/script/gen | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/grpc-bridge/service/script/gen b/examples/grpc-bridge/service/script/gen index 5813e58d20556..bed331f80c1de 100755 --- a/examples/grpc-bridge/service/script/gen +++ b/examples/grpc-bridge/service/script/gen @@ -4,7 +4,7 @@ set -e cd $(dirname $0)/.. -rm -rf generated/* +rm -rf gen/* # generate the protobufs protoc --go_out=plugins=grpc:./gen \ From 43ffbeee88749f9c4fd4c49b364c5d26e9eb16ce Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 29 Mar 2019 10:39:33 -0400 Subject: [PATCH 037/165] security: update distributor application example to include e-mail. (#6425) This will reduce one of the manual steps we keep repeating. Signed-off-by: Harvey Tuch --- SECURITY_RELEASE_PROCESS.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/SECURITY_RELEASE_PROCESS.md b/SECURITY_RELEASE_PROCESS.md index 8a2c569da83c8..28c552d5673f7 100644 --- a/SECURITY_RELEASE_PROCESS.md +++ b/SECURITY_RELEASE_PROCESS.md @@ -256,6 +256,11 @@ We are definitely willing to help! > 8. Have someone already on the list vouch for the person requesting membership on behalf of your distribution. -CrashOverride will vouch for Acidburn joining the list on behalf of the "Seven" -distribution. +CrashOverride will vouch for the "Seven" distribution joining the distribution list. + +> 9. Nominate an e-mail alias or list for your organization to receive updates. This should not be + an individual user address, but instead a list that can be maintained by your organization as + individuals come and go. A good example is envoy-security@seven.com, a bad example is + acidburn@seven.com. You must accept the invite sent to this address or you will not receive any + e-mail updates. ``` From af7c845fc5e37ce5b271e1a7b4566f2d1e8ec290 Mon Sep 17 00:00:00 2001 From: Bartosz Borkowski Date: Fri, 29 Mar 2019 18:11:00 +0100 Subject: [PATCH 038/165] router: support prefix wildcards in virtual hosts domains (#6303) Adds prefix wildcard support (foo.*) in virtual host domains Risk Level: Low (does not change current behavior) Testing: Unit tests Docs Changes: updated domains field documentation in proto file Release Notes: updated Fixes #1269 Signed-off-by: Bartosz Borkowski --- api/envoy/api/v2/route/route.proto | 16 ++-- docs/root/intro/version_history.rst | 1 + source/common/router/config_impl.cc | 51 ++++++++---- source/common/router/config_impl.h | 14 +++- test/common/router/config_impl_test.cc | 107 +++++++++++++++++++++++++ 5 files changed, 164 insertions(+), 25 deletions(-) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index 0c84cfbcf35cc..af984991bccad 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -39,17 +39,21 @@ message VirtualHost { string name = 1 [(validate.rules).string.min_bytes = 1]; // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the form of ``*.foo.com`` or - // ``*-bar.foo.com``. + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. // // .. note:: // // The wildcard will not match the empty string. // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // Additionally, a special entry ``*`` is allowed which will match any - // host/authority header. Only a single virtual host in the entire route - // configuration can match on ``*``. A domain must be unique across all virtual - // hosts or the config will fail to load. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. repeated string domains = 2 [(validate.rules).repeated .min_items = 1]; // The list of routes that will be matched, in order, for incoming requests. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index b601e6f1dad62..f7a360894180f 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -61,6 +61,7 @@ Version history * router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:` * router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. * router: added per-route configuration of :ref:`internal redirects `. +* router: added support for prefix wildcards in :ref:`virtual host domains` * stats: added support for histograms in prometheus * stats: added usedonly flag to prometheus stats to only output metrics which have been updated at least once. diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 3821bec3e7e42..12a5edffdfe2a 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -941,19 +941,21 @@ const RouteSpecificFilterConfig* VirtualHostImpl::perFilterConfig(const std::str return per_filter_configs_.get(name); } -const VirtualHostImpl* RouteMatcher::findWildcardVirtualHost(const std::string& host) const { - // We do a longest wildcard suffix match against the host that's passed in. - // (e.g. foo-bar.baz.com should match *-bar.baz.com before matching *.baz.com) - // This is done by scanning the length => wildcards map looking for every - // wildcard whose size is < length. - for (const auto& iter : wildcard_virtual_host_suffixes_) { +const VirtualHostImpl* RouteMatcher::findWildcardVirtualHost( + const std::string& host, const RouteMatcher::WildcardVirtualHosts& wildcard_virtual_hosts, + RouteMatcher::SubstringFunction substring_function) const { + // We do a longest wildcard match against the host that's passed in + // (e.g. foo-bar.baz.com should match *-bar.baz.com before matching *.baz.com for suffix + // wildcards). This is done by scanning the length => wildcards map looking for every wildcard + // whose size is < length. + for (const auto& iter : wildcard_virtual_hosts) { const uint32_t wildcard_length = iter.first; const auto& wildcard_map = iter.second; // >= because *.foo.com shouldn't match .foo.com. if (wildcard_length >= host.size()) { continue; } - const auto& match = wildcard_map.find(host.substr(host.size() - wildcard_length)); + const auto& match = wildcard_map.find(substring_function(host, wildcard_length)); if (match != wildcard_map.end()) { return match->second.get(); } @@ -970,20 +972,26 @@ RouteMatcher::RouteMatcher(const envoy::api::v2::RouteConfiguration& route_confi factory_context, validate_clusters)); for (const std::string& domain_name : virtual_host_config.domains()) { const std::string domain = Http::LowerCaseString(domain_name).get(); + bool duplicate_found = false; if ("*" == domain) { if (default_virtual_host_) { throw EnvoyException(fmt::format("Only a single wildcard domain is permitted")); } default_virtual_host_ = virtual_host; } else if (domain.size() > 0 && '*' == domain[0]) { - wildcard_virtual_host_suffixes_[domain.size() - 1].emplace(domain.substr(1), virtual_host); + duplicate_found = !wildcard_virtual_host_suffixes_[domain.size() - 1] + .emplace(domain.substr(1), virtual_host) + .second; + } else if (domain.size() > 0 && '*' == domain[domain.size() - 1]) { + duplicate_found = !wildcard_virtual_host_prefixes_[domain.size() - 1] + .emplace(domain.substr(0, domain.size() - 1), virtual_host) + .second; } else { - if (virtual_hosts_.find(domain) != virtual_hosts_.end()) { - throw EnvoyException(fmt::format( - "Only unique values for domains are permitted. Duplicate entry of domain {}", - domain)); - } - virtual_hosts_.emplace(domain, virtual_host); + duplicate_found = !virtual_hosts_.emplace(domain, virtual_host).second; + } + if (duplicate_found) { + throw EnvoyException(fmt::format( + "Only unique values for domains are permitted. Duplicate entry of domain {}", domain)); } } } @@ -1012,7 +1020,8 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::HeaderMap& const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::HeaderMap& headers) const { // Fast path the case where we only have a default virtual host. - if (virtual_hosts_.empty() && wildcard_virtual_host_suffixes_.empty() && default_virtual_host_) { + if (virtual_hosts_.empty() && wildcard_virtual_host_suffixes_.empty() && + wildcard_virtual_host_prefixes_.empty()) { return default_virtual_host_.get(); } @@ -1024,7 +1033,17 @@ const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::HeaderMap& head return iter->second.get(); } if (!wildcard_virtual_host_suffixes_.empty()) { - const VirtualHostImpl* vhost = findWildcardVirtualHost(host); + const VirtualHostImpl* vhost = findWildcardVirtualHost( + host, wildcard_virtual_host_suffixes_, + [](const std::string& h, int l) -> std::string { return h.substr(h.size() - l); }); + if (vhost != nullptr) { + return vhost; + } + } + if (!wildcard_virtual_host_prefixes_.empty()) { + const VirtualHostImpl* vhost = findWildcardVirtualHost( + host, wildcard_virtual_host_prefixes_, + [](const std::string& h, int l) -> std::string { return h.substr(0, l); }); if (vhost != nullptr) { return vhost; } diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 6a010d8818da7..a3285b722c43b 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -722,7 +722,14 @@ class RouteMatcher { private: const VirtualHostImpl* findVirtualHost(const Http::HeaderMap& headers) const; - const VirtualHostImpl* findWildcardVirtualHost(const std::string& host) const; + + typedef std::map, + std::greater> + WildcardVirtualHosts; + typedef std::function SubstringFunction; + const VirtualHostImpl* findWildcardVirtualHost(const std::string& host, + const WildcardVirtualHosts& wildcard_virtual_hosts, + SubstringFunction substring_function) const; std::unordered_map virtual_hosts_; // std::greater as a minor optimization to iterate from more to less specific @@ -734,8 +741,9 @@ class RouteMatcher { // and climbs to about 110ns once there are any entries. // // The break-even is 4 entries. - std::map, std::greater> - wildcard_virtual_host_suffixes_; + WildcardVirtualHosts wildcard_virtual_host_suffixes_; + WildcardVirtualHosts wildcard_virtual_host_prefixes_; + VirtualHostSharedPtr default_virtual_host_; }; diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 8bf39550ff09b..0c1734faf4991 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2645,6 +2645,113 @@ TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { "Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com"); } +TEST_F(RouteMatcherTest, TestDuplicateWildcardDomainConfig) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: www2 } +- name: www2_staging + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: www2_staging } + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, "Only a single wildcard domain is permitted"); +} + +TEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: ["*.lyft.com"] + routes: + - match: { prefix: "/" } + route: { cluster: www2 } +- name: www2_staging + domains: ["*.LYFT.COM"] + routes: + - match: { prefix: "/" } + route: { cluster: www2_staging } + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, + "Only unique values for domains are permitted. Duplicate entry of domain *.lyft.com"); +} + +TEST_F(RouteMatcherTest, TestDuplicatePrefixWildcardDomainConfig) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: ["bar.*"] + routes: + - match: { prefix: "/" } + route: { cluster: www2 } +- name: www2_staging + domains: ["BAR.*"] + routes: + - match: { prefix: "/" } + route: { cluster: www2_staging } + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, + "Only unique values for domains are permitted. Duplicate entry of domain bar.*"); +} + +TEST_F(RouteMatcherTest, TestDomainMatchOrderConfig) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: exact + domains: ["www.example.com", "www.example.cc", "wwww.example.com" ] + routes: + - match: { prefix: "/" } + route: { cluster: exact } +- name: suffix + domains: ["*w.example.com" ] + routes: + - match: { prefix: "/" } + route: { cluster: suffix } +- name: prefix + domains: ["www.example.c*", "ww.example.c*"] + routes: + - match: { prefix: "/" } + route: { cluster: prefix } +- name: default + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: default } + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + + EXPECT_EQ( + "exact", + config.route(genHeaders("www.example.com", "/", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ( + "exact", + config.route(genHeaders("wwww.example.com", "/", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("exact", + config.route(genHeaders("www.example.cc", "/", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("suffix", + config.route(genHeaders("ww.example.com", "/", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("prefix", + config.route(genHeaders("www.example.co", "/", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("default", + config.route(genHeaders("w.example.com", "/", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("default", + config.route(genHeaders("www.example.c", "/", "GET"), 0)->routeEntry()->clusterName()); +} + static Http::TestHeaderMapImpl genRedirectHeaders(const std::string& host, const std::string& path, bool ssl, bool internal) { Http::TestHeaderMapImpl headers{ From 33182973a18ef66d24323f50da1bc1c922b6322d Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Fri, 29 Mar 2019 15:09:08 -0400 Subject: [PATCH 039/165] test: Capture alarm->time_system_.mutex_ in a temp prior to running libevent timers, which can delete the alarm. (#6429) * When relocking after activating the libevent timer, don't rely on this->time_system_ to find the mutex as it may be deleted by the time we get control back. Signed-off-by: Joshua Marantz --- test/test_common/simulated_time_system.cc | 43 ++++++++++++++++++++--- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index 6727634b5f8d5..204c47943a4c0 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -12,6 +12,40 @@ namespace Envoy { namespace Event { +namespace { +class UnlockGuard { +public: + /** + * Establishes a scoped mutex-lock; the mutex is unlocked upon construction. + * The main motivation for setting up a class to manage this, rather than + * simply { mutex.unlock(); operation(); mutex.lock(); } is that in method + * Alarm::activateLockHeld(), the mutex is owned by the time-system, which + * lives long enough. However the Alarm may be destructed while the lock is + * dropped, so there can be a tsan error when re-taking time_system_.mutex_. + * + * It's also easy to make a temp mutex reference, however this confuses + * clang's thread-annotation analysis, whereas this unlock-guard seems to work + * with thread annotation. + * + * Another reason to use this Guard class is so that the mutex is re-taken + * even if there is an exception thrown while the lock is dropped. That is + * not likely to happen at this call-site as the functions being called don't + * throw. + * + * @param lock the mutex. + */ + explicit UnlockGuard(Thread::BasicLockable& lock) : lock_(lock) { lock_.unlock(); } + + /** + * Destruction of the UnlockGuard re-locks the lock. + */ + ~UnlockGuard() { lock_.lock(); } + +private: + Thread::BasicLockable& lock_; +}; +} // namespace + // Our simulated alarm inherits from TimerImpl so that the same dispatching // mechanism used in RealTimeSystem timers is employed for simulated alarms. class SimulatedTimeSystemHelper::Alarm : public Timer { @@ -45,13 +79,14 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { armed_ = false; time_system_.incPending(); - // We don't want to activate the alarm under lock, as it will make a libevent call, - // and libevent itself uses locks: + // We don't want to activate the alarm under lock, as it will make a + // libevent call, and libevent itself uses locks: // https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/event.c#L1917 - time_system_.mutex_.unlock(); + // See class comment for UnlockGuard for details on saving + // time_system_.mutex_ prior to running libevent, which may delete this. + UnlockGuard unlocker(time_system_.mutex_); std::chrono::milliseconds duration = std::chrono::milliseconds::zero(); base_timer_->enableTimer(duration); - time_system_.mutex_.lock(); } MonotonicTime time() const EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { From 4ed3afebe9801e40095a68fa135ccf10a474818a Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 29 Mar 2019 16:14:47 -0400 Subject: [PATCH 040/165] docs/api: hide represent_ipv4_remote_address_as_ipv4_mapped_ipv6. (#6432) This is not implemented yet. Fixes #6405. Signed-off-by: Harvey Tuch --- .../v2/http_connection_manager.proto | 1 + docs/root/configuration/http_conn_man/headers.rst | 6 ------ docs/root/configuration/http_conn_man/runtime.rst | 11 ----------- 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 3f7620d2bc3df..71f29474a1c96 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -345,6 +345,7 @@ message HttpConnectionManager { // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 // ` for runtime // control. + // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; // The configuration for HTTP upgrades. diff --git a/docs/root/configuration/http_conn_man/headers.rst b/docs/root/configuration/http_conn_man/headers.rst index c29eeaa6f7e9d..d48bac34152f6 100644 --- a/docs/root/configuration/http_conn_man/headers.rst +++ b/docs/root/configuration/http_conn_man/headers.rst @@ -325,12 +325,6 @@ A few very important notes about XFF: Envoy will not consider it internal. This is a known "bug" due to the simplification of how XFF is parsed to determine if a request is internal. In this scenario, do not forward XFF and allow Envoy to generate a new one with a single internal origin IP. -3. Testing IPv6 in a large multi-hop system can be difficult from a change management perspective. - For testing IPv6 compatibility of upstream services which parse XFF header values, - :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 ` - can be enabled in the v2 API. Envoy will append an IPv4 address in mapped IPv6 format, e.g. - ::FFFF:50.0.0.1. This change will also apply to - :ref:`config_http_conn_man_headers_x-envoy-external-address`. .. _config_http_conn_man_headers_x-forwarded-proto: diff --git a/docs/root/configuration/http_conn_man/runtime.rst b/docs/root/configuration/http_conn_man/runtime.rst index 9b5286bd02b68..22fc453b3ad93 100644 --- a/docs/root/configuration/http_conn_man/runtime.rst +++ b/docs/root/configuration/http_conn_man/runtime.rst @@ -5,17 +5,6 @@ Runtime The HTTP connection manager supports the following runtime settings: -.. _config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6: - -http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - % of requests with a remote address that will have their IPv4 address mapped to IPv6. Defaults to - 0. - :ref:`use_remote_address ` - must also be enabled. See - :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - ` - for more details. - .. _config_http_conn_man_runtime_client_enabled: tracing.client_enabled From 48082bcd22fe9165eb73bed6d27857f578df63b5 Mon Sep 17 00:00:00 2001 From: Spencer Lewis Date: Fri, 29 Mar 2019 13:16:05 -0700 Subject: [PATCH 041/165] router: max retries header takes precedence over retry policies (#6421) The number of retries can be set in the virtual host retry config, the route retry config, or with the max retries header. This change makes the retries header overrule the other configs. Signed-off-by: Spencer Lewis --- .../http_filters/router_filter.rst | 8 ++++---- docs/root/intro/version_history.rst | 1 + source/common/router/retry_state_impl.cc | 19 ++++++++++--------- test/common/router/retry_state_impl_test.cc | 16 ++++++++++++++++ 4 files changed, 31 insertions(+), 13 deletions(-) diff --git a/docs/root/configuration/http_filters/router_filter.rst b/docs/root/configuration/http_filters/router_filter.rst index 143a438048ff2..cb5ab6a5941f6 100644 --- a/docs/root/configuration/http_filters/router_filter.rst +++ b/docs/root/configuration/http_filters/router_filter.rst @@ -28,10 +28,10 @@ x-envoy-max-retries ^^^^^^^^^^^^^^^^^^^ If a :ref:`route config retry policy ` or a :ref:`virtual host retry policy ` is in place, Envoy will default to retrying -one time unless explicitly specified. The number of retries can be explicitly set in either the virtual host retry config, -or the route retry config, or by using this header. If a retry policy is not configured and -:ref:`config_http_filters_router_x-envoy-retry-on` or :ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers -are not specified, Envoy will not retry a failed request. +one time unless explicitly specified. The number of retries can be explicitly set in the virtual host retry config, +the route retry config, or by using this header. If this header is used, its value takes precedence over the number of +retries set in either retry policy. If a retry policy is not configured and :ref:`config_http_filters_router_x-envoy-retry-on` +or :ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers are not specified, Envoy will not retry a failed request. A few notes on how Envoy does retries: diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index f7a360894180f..081a6695afba1 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -61,6 +61,7 @@ Version history * router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:` * router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. * router: added per-route configuration of :ref:`internal redirects `. +* router: made :ref: `max retries header ` take precedence over the number of retries in route and virtual host retry policies. * router: added support for prefix wildcards in :ref:`virtual host domains` * stats: added support for histograms in prometheus * stats: added usedonly flag to prometheus stats to only output metrics which have been diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index d826ae82f341a..fa4a18c0ab122 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -57,8 +57,16 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::HeaderMap& retry_priority_(route_policy.retryPriority()), retriable_status_codes_(route_policy.retriableStatusCodes()) { + retry_on_ = route_policy.retryOn(); + retries_remaining_ = std::max(retries_remaining_, route_policy.numRetries()); + const uint32_t base = runtime_.snapshot().getInteger("upstream.base_retry_backoff_ms", 25); + // Cap the max interval to 10 times the base interval to ensure reasonable backoff intervals. + backoff_strategy_ = std::make_unique(base, base * 10, random_); + host_selection_max_attempts_ = route_policy.hostSelectionMaxAttempts(); + + // Merge in the headers. if (request_headers.EnvoyRetryOn()) { - retry_on_ = parseRetryOn(request_headers.EnvoyRetryOn()->value().c_str()); + retry_on_ |= parseRetryOn(request_headers.EnvoyRetryOn()->value().c_str()); } if (request_headers.EnvoyRetryGrpcOn()) { retry_on_ |= parseRetryGrpcOn(request_headers.EnvoyRetryGrpcOn()->value().c_str()); @@ -67,6 +75,7 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::HeaderMap& const char* max_retries = request_headers.EnvoyMaxRetries()->value().c_str(); uint64_t temp; if (StringUtil::atoull(max_retries, temp)) { + // The max retries header takes precedence if set. retries_remaining_ = temp; } } @@ -79,14 +88,6 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::HeaderMap& } } } - - // Merge in the route policy. - retry_on_ |= route_policy.retryOn(); - retries_remaining_ = std::max(retries_remaining_, route_policy.numRetries()); - const uint32_t base = runtime_.snapshot().getInteger("upstream.base_retry_backoff_ms", 25); - // Cap the max interval to 10 times the base interval to ensure reasonable backoff intervals. - backoff_strategy_ = std::make_unique(base, base * 10, random_); - host_selection_max_attempts_ = route_policy.hostSelectionMaxAttempts(); } RetryStateImpl::~RetryStateImpl() { resetRetry(); } diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index 813daa4038b08..06f5f209ccd22 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -435,6 +435,8 @@ TEST_F(RouterRetryStateImplTest, NoAvailableRetries) { } TEST_F(RouterRetryStateImplTest, MaxRetriesHeader) { + // The max retries header will take precedence over the policy + policy_.num_retries_ = 4; Http::TestHeaderMapImpl request_headers{{"x-envoy-retry-on", "connect-failure"}, {"x-envoy-retry-grpc-on", "cancelled"}, {"x-envoy-max-retries", "3"}}; @@ -522,6 +524,20 @@ TEST_F(RouterRetryStateImplTest, Cancel) { EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); } +TEST_F(RouterRetryStateImplTest, ZeroMaxRetriesHeader) { + Http::TestHeaderMapImpl request_headers{{"x-envoy-retry-on", "connect-failure"}, + {"x-envoy-retry-grpc-on", "cancelled"}, + {"x-envoy-max-retries", "0"}}; + setup(request_headers); + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_TRUE(state_->enabled()); + + EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, + state_->shouldRetryReset(connect_failure_, callback_)); +} + } // namespace } // namespace Router } // namespace Envoy From 075edf802b9ef94bdf26fddc9e775f8bb89df92d Mon Sep 17 00:00:00 2001 From: Gabriel Sagula Date: Mon, 1 Apr 2019 02:09:28 -0700 Subject: [PATCH 042/165] ext_authz: support for buffering request body (#5824) This PR adds support to `ext_authz` filter for buffering the request data. This is useful when the authorization server needs to check the request body, e.g. HMAC validation. Fixes #5676 *Risk Level*: low *Testing*: unit *Docs Changes*: yes *Release Notes*: yes Signed-off-by: Gabriel --- .../filter/http/ext_authz/v2/ext_authz.proto | 17 ++ api/envoy/service/auth/v2/BUILD | 1 + .../service/auth/v2/attribute_context.proto | 4 + docs/root/intro/version_history.rst | 6 +- source/common/http/headers.h | 3 +- .../common/ext_authz/check_request_utils.cc | 19 +- .../common/ext_authz/check_request_utils.h | 10 +- .../common/ext_authz/ext_authz_http_impl.cc | 20 +- .../extensions/filters/http/ext_authz/BUILD | 1 + .../filters/http/ext_authz/ext_authz.cc | 80 ++++-- .../filters/http/ext_authz/ext_authz.h | 23 +- .../ext_authz/check_request_utils_test.cc | 75 +++++- .../filters/http/ext_authz/config_test.cc | 2 + .../filters/http/ext_authz/ext_authz_test.cc | 239 +++++++++++++++++- 14 files changed, 446 insertions(+), 54 deletions(-) diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index e79e59865c06a..abe1638b858e6 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -50,6 +50,23 @@ message ExtAuthz { // semantically compatible. Deprecation note: This field is deprecated and should only be used for // version upgrade. See release notes for more details. bool use_alpha = 4 [deprecated = true]; + + // Enables filter to buffer the client request body and send it within the authorization request. + BufferSettings with_request_body = 5; +} + +// Configuration for buffering the request data. +message BufferSettings { + // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return + // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number + // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow + // `. + uint32 max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + + // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the + // filter. + bool allow_partial_message = 2; } // HttpService is used for raw HTTP communication between the filter and the authorization service. diff --git a/api/envoy/service/auth/v2/BUILD b/api/envoy/service/auth/v2/BUILD index 5cf93deb777cf..57041668ddc8e 100644 --- a/api/envoy/service/auth/v2/BUILD +++ b/api/envoy/service/auth/v2/BUILD @@ -9,6 +9,7 @@ api_proto_library_internal( ], deps = [ "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", ], ) diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto index 7cf7e18eae4c0..b110cec50ed20 100644 --- a/api/envoy/service/auth/v2/attribute_context.proto +++ b/api/envoy/service/auth/v2/attribute_context.proto @@ -6,6 +6,7 @@ option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.auth.v2"; +import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/address.proto"; import "google/protobuf/timestamp.proto"; @@ -112,6 +113,9 @@ message AttributeContext { // See :repo:`headers.h:ProtocolStrings ` for a list of all // possible values. string protocol = 10; + + // The HTTP request body. + string body = 11; } // The source of a network activity, such as starting a TCP connection. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 081a6695afba1..601c4af05e521 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -24,8 +24,10 @@ Version history * config: use Envoy cpuset size to set the default number or worker threads if :option:`--cpuset-threads` is enabled. * config: added support for :ref:`initial_fetch_timeout `. The timeout is disabled by default. * cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. -* ext_authz: added an configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. -* ext_authz: migrated from V2alpha to V2 and improved the documentation. +* ext_authz: added support for buffering request body. +* ext_authz: migrated from v2alpha to v2 and improved docs. +* ext_authz: added a configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. +* ext_authz: migrated from v2alpha to v2 and improved the documentation. * ext_authz: authorization request and response configuration has been separated into two distinct objects: :ref:`authorization request ` and :ref:`authorization response `. In addition, :ref:`client headers diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 0f90238da3d70..79c0b1cd153c7 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -172,8 +172,9 @@ class HeaderValues { const std::string Connect{"CONNECT"}; const std::string Get{"GET"}; const std::string Head{"HEAD"}; - const std::string Post{"POST"}; const std::string Options{"OPTIONS"}; + const std::string Post{"POST"}; + const std::string Trace{"TRACE"}; } MethodValues; struct { diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index c887b4954eda1..44b87f00b8bd8 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -76,7 +76,7 @@ std::string CheckRequestUtils::getHeaderStr(const Envoy::Http::HeaderEntry* entr void CheckRequestUtils::setHttpRequest( ::envoy::service::auth::v2::AttributeContext_HttpRequest& httpreq, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers) { + const Envoy::Http::HeaderMap& headers, uint64_t max_request_bytes) { // Set id // The streamId is not qualified as a const. Although it is as it does not modify the object. @@ -116,20 +116,29 @@ void CheckRequestUtils::setHttpRequest( return Envoy::Http::HeaderMap::Iterate::Continue; }, mutable_headers); + + // Set request body. + const Buffer::Instance* buffer = sdfc->decodingBuffer(); + if (max_request_bytes > 0 && buffer != nullptr) { + const uint64_t length = std::min(buffer->length(), max_request_bytes); + std::string data(length, 0); + buffer->copyOut(0, length, &data[0]); + httpreq.set_body(std::move(data)); + } } void CheckRequestUtils::setAttrContextRequest( ::envoy::service::auth::v2::AttributeContext_Request& req, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers) { - setHttpRequest(*req.mutable_http(), callbacks, headers); + const Envoy::Http::HeaderMap& headers, uint64_t max_request_bytes) { + setHttpRequest(*req.mutable_http(), callbacks, headers, max_request_bytes); } void CheckRequestUtils::createHttpCheck( const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, const Envoy::Http::HeaderMap& headers, Protobuf::Map&& context_extensions, - envoy::service::auth::v2::CheckRequest& request) { + envoy::service::auth::v2::CheckRequest& request, uint64_t max_request_bytes) { auto attrs = request.mutable_attributes(); @@ -140,7 +149,7 @@ void CheckRequestUtils::createHttpCheck( setAttrContextPeer(*attrs->mutable_source(), *cb->connection(), service, false); setAttrContextPeer(*attrs->mutable_destination(), *cb->connection(), "", true); - setAttrContextRequest(*attrs->mutable_request(), callbacks, headers); + setAttrContextRequest(*attrs->mutable_request(), callbacks, headers, max_request_bytes); // Fill in the context extensions: (*attrs->mutable_context_extensions()) = std::move(context_extensions); diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.h b/source/extensions/filters/common/ext_authz/check_request_utils.h index a3214d17b50d9..da89fe72fe4e7 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.h +++ b/source/extensions/filters/common/ext_authz/check_request_utils.h @@ -43,20 +43,19 @@ class CheckRequestUtils { * @param headers supplies the header map with http headers that will be used to create the * check request. * @param request is the reference to the check request that will be filled up. - * + * @param with_request_body when true, will add the request body to the check request. */ static void createHttpCheck(const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, const Envoy::Http::HeaderMap& headers, Protobuf::Map&& context_extensions, - envoy::service::auth::v2::CheckRequest& request); + envoy::service::auth::v2::CheckRequest& request, uint64_t max_request_bytes); /** * createTcpCheck is used to extract the attributes from the network layer and fill them up * in the CheckRequest proto message. * @param callbacks supplies the network layer context from which data can be extracted. * @param request is the reference to the check request that will be filled up. - * */ static void createTcpCheck(const Network::ReadFilterCallbacks* callbacks, envoy::service::auth::v2::CheckRequest& request); @@ -67,10 +66,11 @@ class CheckRequestUtils { const bool local); static void setHttpRequest(::envoy::service::auth::v2::AttributeContext_HttpRequest& httpreq, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers); + const Envoy::Http::HeaderMap& headers, uint64_t max_request_bytes); static void setAttrContextRequest(::envoy::service::auth::v2::AttributeContext_Request& req, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, - const Envoy::Http::HeaderMap& headers); + const Envoy::Http::HeaderMap& headers, + uint64_t max_request_bytes); static std::string getHeaderStr(const Envoy::Http::HeaderEntry* entry); static Envoy::Http::HeaderMap::Iterate fillHttpHeaders(const Envoy::Http::HeaderEntry&, void*); }; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index f288ca594736b..3d7760ece43d6 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -161,7 +161,17 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; - Http::HeaderMapPtr headers = std::make_unique(lengthZeroHeader()); + Http::HeaderMapPtr headers; + const uint64_t request_length = request.attributes().request().http().body().size(); + if (request_length > 0) { + headers = + std::make_unique>>( + {{Http::Headers::get().ContentLength, std::to_string(request_length)}}); + } else { + headers = std::make_unique(lengthZeroHeader()); + } + for (const auto& header : request.attributes().request().http().headers()) { const Http::LowerCaseString key{header.first}; if (config_->requestHeaderMatchers()->matches(key.get())) { @@ -179,8 +189,14 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, headers->setReference(header_to_add.first, header_to_add.second); } + Http::MessagePtr message = std::make_unique(std::move(headers)); + if (request_length > 0) { + message->body() = + std::make_unique(request.attributes().request().http().body()); + } + request_ = cm_.httpAsyncClientForCluster(config_->cluster()) - .send(std::make_unique(std::move(headers)), *this, + .send(std::move(message), *this, Http::AsyncClient::RequestOptions().setTimeout(config_->timeout())); } diff --git a/source/extensions/filters/http/ext_authz/BUILD b/source/extensions/filters/http/ext_authz/BUILD index 423a8b51b338d..f5b4e7eacea12 100644 --- a/source/extensions/filters/http/ext_authz/BUILD +++ b/source/extensions/filters/http/ext_authz/BUILD @@ -24,6 +24,7 @@ envoy_cc_library( "//source/common/common:matchers_lib", "//source/common/common:minimal_logger_lib", "//source/common/http:codes_lib", + "//source/common/http:utility_lib", "//source/common/router:config_lib", "//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib", "//source/extensions/filters/common/ext_authz:ext_authz_http_lib", diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index a4dac53b8f2ad..6a719bac889c3 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -3,6 +3,7 @@ #include "common/common/assert.h" #include "common/common/enum_to_int.h" #include "common/http/codes.h" +#include "common/http/utility.h" #include "common/router/config_impl.h" #include "extensions/filters/http/well_known_names.h" @@ -54,31 +55,57 @@ void Filter::initiateCall(const Http::HeaderMap& headers) { context_extensions = maybe_merged_per_route_config.value().takeContextExtensions(); } Filters::Common::ExtAuthz::CheckRequestUtils::createHttpCheck( - callbacks_, headers, std::move(context_extensions), check_request_); + callbacks_, headers, std::move(context_extensions), check_request_, + config_->maxRequestBytes()); + ENVOY_STREAM_LOG(trace, "ext_authz filter calling authorization server", *callbacks_); state_ = State::Calling; - // Don't let the filter chain continue as we are going to invoke check call. - filter_return_ = FilterReturn::StopDecoding; + filter_return_ = FilterReturn::StopDecoding; // Don't let the filter chain continue as we are + // going to invoke check call. initiating_call_ = true; - ENVOY_STREAM_LOG(trace, "ext_authz filter calling authorization server", *callbacks_); client_->check(*this, check_request_, callbacks_->activeSpan()); initiating_call_ = false; } -Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool) { +Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool end_stream) { request_headers_ = &headers; + buffer_data_ = config_->withRequestBody() && + !(end_stream || Http::Utility::isWebSocketUpgradeRequest(headers) || + Http::Utility::isH2UpgradeRequest(headers)); + if (buffer_data_) { + ENVOY_STREAM_LOG(debug, "ext_authz filter is buffering the request", *callbacks_); + if (!config_->allowPartialMessage()) { + callbacks_->setDecoderBufferLimit(config_->maxRequestBytes()); + } + return Http::FilterHeadersStatus::StopIteration; + } + initiateCall(headers); return filter_return_ == FilterReturn::StopDecoding ? Http::FilterHeadersStatus::StopIteration : Http::FilterHeadersStatus::Continue; } -Http::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool) { +Http::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool end_stream) { + if (buffer_data_) { + if (end_stream || isBufferFull()) { + ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); + initiateCall(*request_headers_); + } else { + return Http::FilterDataStatus::StopIterationAndBuffer; + } + } + return filter_return_ == FilterReturn::StopDecoding ? Http::FilterDataStatus::StopIterationAndWatermark : Http::FilterDataStatus::Continue; } Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap&) { + if (buffer_data_ && filter_return_ != FilterReturn::StopDecoding) { + ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); + initiateCall(*request_headers_); + } + return filter_return_ == FilterReturn::StopDecoding ? Http::FilterTrailersStatus::StopIteration : Http::FilterTrailersStatus::Continue; } @@ -122,29 +149,30 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { break; } - ENVOY_STREAM_LOG(trace, "ext_authz received status code {}", *callbacks_, + ENVOY_STREAM_LOG(trace, "ext_authz filter received status code {}", *callbacks_, enumToInt(response->status_code)); // We fail open/fail close based of filter config // if there is an error contacting the service. if (response->status == CheckStatus::Denied || (response->status == CheckStatus::Error && !config_->failureModeAllow())) { - ENVOY_STREAM_LOG(debug, "ext_authz rejected the request", *callbacks_); - ENVOY_STREAM_LOG(trace, "ext_authz downstream header(s):", *callbacks_); - callbacks_->sendLocalReply(response->status_code, response->body, - [& headers = response->headers_to_add, &callbacks = *callbacks_]( - Http::HeaderMap& response_headers) -> void { - for (const auto& header : headers) { - response_headers.remove(header.first); - response_headers.addCopy(header.first, header.second); - ENVOY_STREAM_LOG(trace, " '{}':'{}'", callbacks, - header.first.get(), header.second); - } - }, - absl::nullopt); + ENVOY_STREAM_LOG(debug, "ext_authz filter rejected the request", *callbacks_); + callbacks_->sendLocalReply( + response->status_code, response->body, + [& headers = response->headers_to_add, + &callbacks = *callbacks_](Http::HeaderMap& response_headers) -> void { + ENVOY_STREAM_LOG(trace, + "ext_authz filter added header(s) to the local response:", callbacks); + for (const auto& header : headers) { + response_headers.remove(header.first); + response_headers.addCopy(header.first, header.second); + ENVOY_STREAM_LOG(trace, " '{}':'{}'", callbacks, header.first.get(), header.second); + } + }, + absl::nullopt); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UnauthorizedExternalService); } else { - ENVOY_STREAM_LOG(debug, "ext_authz accepted the request", *callbacks_); + ENVOY_STREAM_LOG(debug, "ext_authz filter accepted the request", *callbacks_); // Let the filter chain continue. filter_return_ = FilterReturn::ContinueDecoding; if (config_->failureModeAllow() && response->status == CheckStatus::Error) { @@ -153,7 +181,7 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { } // Only send headers if the response is ok. if (response->status == CheckStatus::OK) { - ENVOY_STREAM_LOG(trace, "ext_authz upstream header(s):", *callbacks_); + ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the request:", *callbacks_); for (const auto& header : response->headers_to_add) { Http::HeaderEntry* header_to_modify = request_headers_->get(header.first); if (header_to_modify) { @@ -179,6 +207,14 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { } } +bool Filter::isBufferFull() { + const auto* buffer = callbacks_->decodingBuffer(); + if (config_->allowPartialMessage() && buffer != nullptr) { + return buffer->length() >= config_->maxRequestBytes(); + } + return false; +} + } // namespace ExtAuthz } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 1993ec641c18e..3cc8821509767 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -39,18 +39,31 @@ class FilterConfig { FilterConfig(const envoy::config::filter::http::ext_authz::v2::ExtAuthz& config, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, Runtime::Loader& runtime, Http::Context& http_context) - : failure_mode_allow_(config.failure_mode_allow()), local_info_(local_info), scope_(scope), - runtime_(runtime), http_context_(http_context) {} + : allow_partial_message_(config.with_request_body().allow_partial_message()), + failure_mode_allow_(config.failure_mode_allow()), + max_request_bytes_(config.with_request_body().max_request_bytes()), local_info_(local_info), + scope_(scope), runtime_(runtime), http_context_(http_context) {} + + bool allowPartialMessage() const { return allow_partial_message_; } + + bool withRequestBody() const { return max_request_bytes_ > 0; } bool failureModeAllow() const { return failure_mode_allow_; } + + uint32_t maxRequestBytes() const { return max_request_bytes_; } + const LocalInfo::LocalInfo& localInfo() const { return local_info_; } + Runtime::Loader& runtime() { return runtime_; } + Stats::Scope& scope() { return scope_; } Http::Context& httpContext() { return http_context_; } private: - bool failure_mode_allow_{}; + const bool allow_partial_message_; + const bool failure_mode_allow_; + const uint32_t max_request_bytes_; const LocalInfo::LocalInfo& local_info_; Stats::Scope& scope_; Runtime::Loader& runtime_; @@ -116,6 +129,8 @@ class Filter : public Logger::Loggable, private: void addResponseHeaders(Http::HeaderMap& header_map, const Http::HeaderVector& headers); + void initiateCall(const Http::HeaderMap& headers); + bool isBufferFull(); // State of this filter's communication with the external authorization service. // The filter has either not started calling the external service, in the middle of calling @@ -127,7 +142,6 @@ class Filter : public Logger::Loggable, // the filter chain should stop. Otherwise the filter chain can continue to the next filter. enum class FilterReturn { ContinueDecoding, StopDecoding }; - void initiateCall(const Http::HeaderMap& headers); Http::HeaderMapPtr getHeaderMap(const Filters::Common::ExtAuthz::ResponsePtr& response); FilterConfigSharedPtr config_; Filters::Common::ExtAuthz::ClientPtr client_; @@ -139,6 +153,7 @@ class Filter : public Logger::Loggable, // Used to identify if the callback to onComplete() is synchronous (on the stack) or asynchronous. bool initiating_call_{}; + bool buffer_data_{}; envoy::service::auth::v2::CheckRequest check_request_{}; }; diff --git a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc index 338ea03c7278e..4adcde6276a33 100644 --- a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc +++ b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc @@ -27,8 +27,30 @@ class CheckRequestUtilsTest : public testing::Test { CheckRequestUtilsTest() { addr_ = std::make_shared("1.2.3.4", 1111); protocol_ = Envoy::Http::Protocol::Http10; + buffer_ = CheckRequestUtilsTest::newTestBuffer(8192); }; + void ExpectBasicHttp() { + EXPECT_CALL(callbacks_, connection()).Times(2).WillRepeatedly(Return(&connection_)); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(&ssl_)); + EXPECT_CALL(callbacks_, streamId()).Times(1).WillOnce(Return(0)); + EXPECT_CALL(callbacks_, decodingBuffer()).WillOnce(Return(buffer_.get())); + EXPECT_CALL(callbacks_, streamInfo()).Times(3).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, protocol()).Times(2).WillRepeatedly(ReturnPointee(&protocol_)); + } + + static Buffer::InstancePtr newTestBuffer(uint64_t size) { + auto buffer = std::make_unique(); + while (buffer->length() < size) { + auto new_buffer = + Buffer::OwnedImpl("Lorem ipsum dolor sit amet, consectetuer adipiscing elit."); + buffer->add(new_buffer); + } + return std::move(buffer); + } + Network::Address::InstanceConstSharedPtr addr_; absl::optional protocol_; CheckRequestUtils check_request_generator_; @@ -37,6 +59,7 @@ class CheckRequestUtilsTest : public testing::Test { NiceMock connection_; NiceMock ssl_; NiceMock req_info_; + Buffer::InstancePtr buffer_; }; // Verify that createTcpCheck's dependencies are invoked when it's called. @@ -51,19 +74,46 @@ TEST_F(CheckRequestUtilsTest, BasicTcp) { } // Verify that createHttpCheck's dependencies are invoked when it's called. +// Verify that check request object has no request data. TEST_F(CheckRequestUtilsTest, BasicHttp) { - Http::HeaderMapImpl headers; - envoy::service::auth::v2::CheckRequest request; - EXPECT_CALL(callbacks_, connection()).Times(2).WillRepeatedly(Return(&connection_)); - EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(&ssl_)); - EXPECT_CALL(callbacks_, streamId()).WillOnce(Return(0)); - EXPECT_CALL(callbacks_, streamInfo()).Times(3).WillRepeatedly(ReturnRef(req_info_)); - EXPECT_CALL(req_info_, protocol()).Times(2).WillRepeatedly(ReturnPointee(&protocol_)); - Protobuf::Map empty; + const uint64_t size = 0; + Http::HeaderMapImpl headers_; + envoy::service::auth::v2::CheckRequest request_; + + ExpectBasicHttp(); + CheckRequestUtils::createHttpCheck(&callbacks_, headers_, + Protobuf::Map(), + request_, size); + ASSERT_EQ(size, request_.attributes().request().http().body().size()); + EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); +} + +// Verify that check request object has only a portion of the request data. +TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { + const uint64_t size = 4049; + Http::HeaderMapImpl headers_; + envoy::service::auth::v2::CheckRequest request_; + + ExpectBasicHttp(); + CheckRequestUtils::createHttpCheck(&callbacks_, headers_, + Protobuf::Map(), + request_, size); + ASSERT_EQ(size, request_.attributes().request().http().body().size()); + EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); +} - CheckRequestUtils::createHttpCheck(&callbacks_, headers, std::move(empty), request); +// Verify that check request object has all the request data. +TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) { + Http::HeaderMapImpl headers_; + envoy::service::auth::v2::CheckRequest request_; + + ExpectBasicHttp(); + CheckRequestUtils::createHttpCheck(&callbacks_, headers_, + Protobuf::Map(), + request_, buffer_->length()); + ASSERT_EQ(buffer_->length(), request_.attributes().request().http().body().size()); + EXPECT_EQ(buffer_->toString().substr(0, buffer_->length()), + request_.attributes().request().http().body()); } // Verify that createHttpCheck extract the proper attributes from the http request into CheckRequest @@ -78,6 +128,7 @@ TEST_F(CheckRequestUtilsTest, CheckAttrContextPeer) { EXPECT_CALL(Const(connection_), ssl()).WillRepeatedly(Return(&ssl_)); EXPECT_CALL(callbacks_, streamId()).WillRepeatedly(Return(0)); EXPECT_CALL(callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); EXPECT_CALL(req_info_, protocol()).WillRepeatedly(ReturnPointee(&protocol_)); EXPECT_CALL(ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); EXPECT_CALL(ssl_, uriSanLocalCertificate()) @@ -87,7 +138,7 @@ TEST_F(CheckRequestUtilsTest, CheckAttrContextPeer) { context_extensions["key"] = "value"; CheckRequestUtils::createHttpCheck(&callbacks_, request_headers, std::move(context_extensions), - request); + request, false); EXPECT_EQ("source", request.attributes().source().principal()); EXPECT_EQ("destination", request.attributes().destination().principal()); diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index 887e50ba776a4..2766bb70b71c3 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -77,6 +77,8 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { path_prefix: /extauth failure_mode_allow: true + with_request_body: + max_request_bytes: 100 )EOF"; ExtAuthzFilterConfig factory; diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index edb4f12d6e18a..64a2fbeffda0d 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -21,6 +21,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" +#include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/mocks.h" @@ -64,7 +65,7 @@ template class HttpFilterTestBase : public T { Filters::Common::ExtAuthz::MockClient* client_; std::unique_ptr filter_; NiceMock filter_callbacks_; - Filters::Common::ExtAuthz::RequestCallbacks* request_callbacks_{}; + Filters::Common::ExtAuthz::RequestCallbacks* request_callbacks_; Http::TestHeaderMapImpl request_headers_; Buffer::OwnedImpl data_; Stats::IsolatedStoreImpl stats_store_; @@ -263,6 +264,242 @@ TEST_F(HttpFilterTest, BadConfig) { ProtoValidationException); } +// Checks that filter does not initiate the authorization request when the buffer reaches the max +// request bytes. +TEST_F(HttpFilterTest, RequestDataIsTooLarge) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + with_request_body: + max_request_bytes: 10 + )EOF"); + + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1); + EXPECT_CALL(connection_, remoteAddress()).Times(0); + EXPECT_CALL(connection_, localAddress()).Times(0); + EXPECT_CALL(*client_, check(_, _, _)).Times(0); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + Buffer::OwnedImpl buffer1("foo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(buffer1, false)); + + Buffer::OwnedImpl buffer2("foobarbaz"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(buffer2, false)); +} + +// Checks that the filter initiates an authorization request when the buffer reaches max +// request bytes and allow_partial_message is set to true. +TEST_F(HttpFilterTest, RequestDataWithPartialMessage) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + with_request_body: + max_request_bytes: 10 + allow_partial_message: true + )EOF"); + + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); + ; + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(*client_, check(_, _, _)).Times(1); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + data_.add("foo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + data_.add("bar"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + data_.add("barfoo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); +} + +// Checks that the filter initiates the authorization process only when the filter decode trailers +// is called. +TEST_F(HttpFilterTest, RequestDataWithSmallBuffer) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + with_request_body: + max_request_bytes: 10 + allow_partial_message: true + )EOF"); + + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(*client_, check(_, _, _)).Times(1); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + data_.add("foo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); +} + +// Checks that the filter buffers the data and initiates the authorization request. +TEST_F(HttpFilterTest, AuthWithRequestData) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + with_request_body: + max_request_bytes: 10 + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + data_.add("foo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + data_.add("bar"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); +} + +// Checks that filter does not buffer data on header-only request. +TEST_F(HttpFilterTest, HeaderOnlyRequest) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + with_request_body: + max_request_bytes: 10 + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, true)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); +} + +// Checks that filter does not buffer data on upgrade WebSocket request. +TEST_F(HttpFilterTest, UpgradeWebsocketRequest) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + with_request_body: + max_request_bytes: 10 + )EOF"); + + prepareCheck(); + + request_headers_.addCopy(Http::Headers::get().Connection, + Http::Headers::get().ConnectionValues.Upgrade); + request_headers_.addCopy(Http::Headers::get().Upgrade, + Http::Headers::get().UpgradeValues.WebSocket); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); +} + +// Checks that filter does not buffer data on upgrade H2 WebSocket request. +TEST_F(HttpFilterTest, H2UpgradeRequest) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + with_request_body: + max_request_bytes: 10 + )EOF"); + + prepareCheck(); + + request_headers_.addCopy(Http::Headers::get().Method, Http::Headers::get().MethodValues.Connect); + request_headers_.addCopy(Http::Headers::get().Protocol, + Http::Headers::get().ProtocolStrings.Http2String); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); +} + +// Checks that filter does not buffer data when is not the end of the stream, but header-only +// request has been received. +TEST_F(HttpFilterTest, HeaderOnlyRequestWithStream) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + with_request_body: + max_request_bytes: 10 + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); +} + // ------------------- // Parameterized Tests // ------------------- From e310a8d23947583f55a0b58c3d7204ee27879c49 Mon Sep 17 00:00:00 2001 From: zyfjeff Date: Tue, 2 Apr 2019 00:14:48 +0800 Subject: [PATCH 043/165] server: Replace unordere_map to absl::flat_hash_map (#6447) Signed-off-by: tianqian.zyf --- source/server/server.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/server/server.h b/source/server/server.h index c09479dfb610d..6de9514367b82 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -36,6 +36,7 @@ #include "extensions/transport_sockets/tls/context_manager_impl.h" +#include "absl/container/flat_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -260,8 +261,8 @@ class InstanceImpl : Logger::Loggable, Http::ContextImpl http_context_; std::unique_ptr heap_shrinker_; const std::thread::id main_thread_id_; - std::unordered_map> stage_callbacks_; - std::unordered_map> stage_completable_callbacks_; + absl::flat_hash_map> stage_callbacks_; + absl::flat_hash_map> stage_completable_callbacks_; }; } // namespace Server From 26d3aaf394164c35ae19ccac8e9d2317a43f3e35 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Mon, 1 Apr 2019 09:21:26 -0700 Subject: [PATCH 044/165] router: remove route-action level header manipulation (#6408) Signed-off-by: Derek Argueta --- api/envoy/api/v2/route/route.proto | 11 +- docs/root/intro/version_history.rst | 1 + source/common/config/rds_json.cc | 2 +- source/common/router/config_impl.cc | 16 +-- source/common/router/config_impl.h | 2 - test/common/router/config_impl_test.cc | 130 +++++++----------- test/integration/header_integration_test.cc | 25 ++-- .../router_check/test/config/TestRoutes.yaml | 8 +- 8 files changed, 77 insertions(+), 118 deletions(-) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index af984991bccad..2d2b56ae67d80 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -639,14 +639,9 @@ message RouteAction { // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] core.RoutingPriority priority = 11; - // [#not-implemented-hide:] - repeated core.HeaderValueOption request_headers_to_add = 12 [deprecated = true]; - - // [#not-implemented-hide:] - repeated core.HeaderValueOption response_headers_to_add = 18 [deprecated = true]; - - // [#not-implemented-hide:] - repeated string response_headers_to_remove = 19 [deprecated = true]; + reserved 12; + reserved 18; + reserved 19; // Specifies a set of rate limit configurations that could be applied to the // route. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 601c4af05e521..8883a1e47b3d3 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -63,6 +63,7 @@ Version history * router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:` * router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. * router: added per-route configuration of :ref:`internal redirects `. +* router: removed deprecated route-action level headers_to_add/remove. * router: made :ref: `max retries header ` take precedence over the number of retries in route and virtual host retry policies. * router: added support for prefix wildcards in :ref:`virtual host domains` * stats: added support for histograms in prometheus diff --git a/source/common/config/rds_json.cc b/source/common/config/rds_json.cc index 036ed45ff26a9..a352c8ca3e76d 100644 --- a/source/common/config/rds_json.cc +++ b/source/common/config/rds_json.cc @@ -312,7 +312,7 @@ void RdsJson::translateRoute(const Json::Object& json_route, envoy::api::v2::rou action->set_priority(priority); for (const auto header_value : json_route.getObjectArray("request_headers_to_add", true)) { - auto* header_value_option = action->mutable_request_headers_to_add()->Add(); + auto* header_value_option = route.mutable_request_headers_to_add()->Add(); BaseJson::translateHeaderValueOption(*header_value, *header_value_option); } diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 12a5edffdfe2a..af3bdb01ffc74 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -332,10 +332,6 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, priority_(ConfigUtility::parsePriority(route.route().priority())), total_cluster_weight_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route().weighted_clusters(), total_weight, 100UL)), - route_action_request_headers_parser_( - HeaderParser::configure(route.route().request_headers_to_add())), - route_action_response_headers_parser_(HeaderParser::configure( - route.route().response_headers_to_add(), route.route().response_headers_to_remove())), request_headers_parser_(HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove())), response_headers_parser_(HeaderParser::configure(route.response_headers_to_add(), @@ -452,10 +448,8 @@ const std::string& RouteEntryImplBase::clusterName() const { return cluster_name void RouteEntryImplBase::finalizeRequestHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info, bool insert_envoy_original_path) const { - // Append user-specified request headers in the following order: route-action-level headers, - // route-level headers, virtual host level headers and finally global connection manager level - // headers. - route_action_request_headers_parser_->evaluateHeaders(headers, stream_info); + // Append user-specified request headers in the following order: route-level headers, virtual + // host level headers and finally global connection manager level headers. request_headers_parser_->evaluateHeaders(headers, stream_info); vhost_.requestHeaderParser().evaluateHeaders(headers, stream_info); vhost_.globalRouteConfig().requestHeaderParser().evaluateHeaders(headers, stream_info); @@ -471,10 +465,8 @@ void RouteEntryImplBase::finalizeRequestHeaders(Http::HeaderMap& headers, void RouteEntryImplBase::finalizeResponseHeaders(Http::HeaderMap& headers, const StreamInfo::StreamInfo& stream_info) const { - // Append user-specified response headers in the following order: route-action-level headers, - // route-level headers, virtual host level headers and finally global connection manager level - // headers. - route_action_response_headers_parser_->evaluateHeaders(headers, stream_info); + // Append user-specified response headers in the following order: route-level headers, virtual + // host level headers and finally global connection manager level headers. response_headers_parser_->evaluateHeaders(headers, stream_info); vhost_.responseHeaderParser().evaluateHeaders(headers, stream_info); vhost_.globalRouteConfig().responseHeaderParser().evaluateHeaders(headers, stream_info); diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index a3285b722c43b..c9fdeb40403e3 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -622,8 +622,6 @@ class RouteEntryImplBase : public RouteEntry, const uint64_t total_cluster_weight_; std::unique_ptr hash_policy_; MetadataMatchCriteriaConstPtr metadata_match_criteria_; - HeaderParserPtr route_action_request_headers_parser_; - HeaderParserPtr route_action_response_headers_parser_; HeaderParserPtr request_headers_parser_; HeaderParserPtr response_headers_parser_; envoy::api::v2::core::Metadata metadata_; diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 0c1734faf4991..2492fbd71029a 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -602,7 +602,7 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { EnvoyException, "Invalid regex '\\^/\\(\\+invalid\\)':"); } -// Validates behavior of request_headers_to_add at router, vhost, and route action levels. +// Validates behavior of request_headers_to_add at router, vhost, and route levels. TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { const std::string yaml = R"EOF( virtual_hosts: @@ -626,24 +626,24 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { route: prefix_rewrite: "/api/new_endpoint" cluster: www2 - request_headers_to_add: - - header: - key: x-global-header1 - value: route-override - - header: - key: x-vhost-header1 - value: route-override - - header: - key: x-route-action-header - value: route-new_endpoint + request_headers_to_add: + - header: + key: x-global-header1 + value: route-override + - header: + key: x-vhost-header1 + value: route-override + - header: + key: x-route-header + value: route-new_endpoint - match: path: "/" route: cluster: root_www2 - request_headers_to_add: - - header: - key: x-route-action-header - value: route-allpath + request_headers_to_add: + - header: + key: x-route-header + value: route-allpath - match: prefix: "/" route: @@ -661,10 +661,10 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { prefix: "/" route: cluster: www2_staging - request_headers_to_add: - - header: - key: x-route-action-header - value: route-allprefix + request_headers_to_add: + - header: + key: x-route-header + value: route-allprefix - name: default domains: - "*" @@ -701,7 +701,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { route->finalizeRequestHeaders(headers, stream_info, true); EXPECT_EQ("route-override", headers.get_("x-global-header1")); EXPECT_EQ("route-override", headers.get_("x-vhost-header1")); - EXPECT_EQ("route-new_endpoint", headers.get_("x-route-action-header")); + EXPECT_EQ("route-new_endpoint", headers.get_("x-route-header")); } // Multiple routes can have same route-level headers with different values. @@ -711,7 +711,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { route->finalizeRequestHeaders(headers, stream_info, true); EXPECT_EQ("vhost-override", headers.get_("x-global-header1")); EXPECT_EQ("vhost1-www2", headers.get_("x-vhost-header1")); - EXPECT_EQ("route-allpath", headers.get_("x-route-action-header")); + EXPECT_EQ("route-allpath", headers.get_("x-route-header")); } // Multiple virtual hosts can have same virtual host level headers with different values. @@ -721,7 +721,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { route->finalizeRequestHeaders(headers, stream_info, true); EXPECT_EQ("global1", headers.get_("x-global-header1")); EXPECT_EQ("vhost1-www2_staging", headers.get_("x-vhost-header1")); - EXPECT_EQ("route-allprefix", headers.get_("x-route-action-header")); + EXPECT_EQ("route-allprefix", headers.get_("x-route-header")); } // Global headers. @@ -734,8 +734,8 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { } } -// Validates behavior of request_headers_to_add at router, vhost, route, and route action levels -// when append is disabled. +// Validates behavior of request_headers_to_add at router, vhost, and route levels when append is +// disabled. TEST_F(RouteMatcherTest, TestRequestHeadersToAddWithAppendFalse) { const std::string yaml = R"EOF( name: foo @@ -770,22 +770,6 @@ name: foo request_headers_to_remove: ["x-route-nope"] route: cluster: www2 - request_headers_to_add: - - header: - key: x-global-header - value: route-action-endpoint - append: false - - header: - key: x-vhost-header - value: route-action-endpoint - append: false - - header: - key: x-route-header - value: route-action-endpoint - - header: - key: x-route-action-header - value: route-action-endpoint - append: false - match: { prefix: "/" } route: { cluster: www2 } - name: default @@ -818,7 +802,6 @@ request_headers_to_remove: ["x-global-nope"] EXPECT_EQ("global", headers.get_("x-global-header")); EXPECT_EQ("vhost-www2", headers.get_("x-vhost-header")); EXPECT_EQ("route-endpoint", headers.get_("x-route-header")); - EXPECT_EQ("route-action-endpoint", headers.get_("x-route-action-header")); // Removed headers. EXPECT_FALSE(headers.has("x-global-nope")); EXPECT_FALSE(headers.has("x-vhost-nope")); @@ -834,7 +817,6 @@ request_headers_to_remove: ["x-global-nope"] EXPECT_EQ("global", headers.get_("x-global-header")); EXPECT_EQ("vhost-www2", headers.get_("x-vhost-header")); EXPECT_FALSE(headers.has("x-route-header")); - EXPECT_FALSE(headers.has("x-route-action-header")); // Removed headers. EXPECT_FALSE(headers.has("x-global-nope")); EXPECT_FALSE(headers.has("x-vhost-nope")); @@ -850,7 +832,6 @@ request_headers_to_remove: ["x-global-nope"] EXPECT_EQ("global", headers.get_("x-global-header")); EXPECT_FALSE(headers.has("x-vhost-header")); EXPECT_FALSE(headers.has("x-route-header")); - EXPECT_FALSE(headers.has("x-route-action-header")); // Removed headers. EXPECT_FALSE(headers.has("x-global-nope")); EXPECT_TRUE(headers.has("x-vhost-nope")); @@ -860,7 +841,7 @@ request_headers_to_remove: ["x-global-nope"] } // Validates behavior of response_headers_to_add and response_headers_to_remove at router, vhost, -// route, and route action levels. +// and route levels. TEST_F(RouteMatcherTest, TestAddRemoveResponseHeaders) { const std::string yaml = R"EOF( name: foo @@ -877,31 +858,27 @@ name: foo response_headers_to_remove: ["x-vhost-remove"] routes: - match: { prefix: "/new_endpoint" } + route: + prefix_rewrite: "/api/new_endpoint" + cluster: www2 response_headers_to_add: - header: key: x-route-header value: route-override - route: - prefix_rewrite: "/api/new_endpoint" - cluster: www2 - response_headers_to_add: - - header: - key: x-global-header1 - value: route-override - - header: - key: x-vhost-header1 - value: route-override - - header: - key: x-route-action-header - value: route-new_endpoint + - header: + key: x-global-header1 + value: route-override + - header: + key: x-vhost-header1 + value: route-override - match: { path: "/" } route: cluster: root_www2 - response_headers_to_add: - - header: - key: x-route-action-header - value: route-allpath - response_headers_to_remove: ["x-route-remove"] + response_headers_to_add: + - header: + key: x-route-header + value: route-allpath + response_headers_to_remove: ["x-route-remove"] - match: { prefix: "/" } route: { cluster: "www2" } - name: www2_staging @@ -914,10 +891,10 @@ name: foo - match: { prefix: "/" } route: cluster: www2_staging - response_headers_to_add: - - header: - key: x-route-action-header - value: route-allprefix + response_headers_to_add: + - header: + key: x-route-header + value: route-allprefix - name: default domains: ["*"] routes: @@ -944,7 +921,6 @@ response_headers_to_remove: ["x-global-remove"] route->finalizeResponseHeaders(headers, stream_info); EXPECT_EQ("route-override", headers.get_("x-global-header1")); EXPECT_EQ("route-override", headers.get_("x-vhost-header1")); - EXPECT_EQ("route-new_endpoint", headers.get_("x-route-action-header")); EXPECT_EQ("route-override", headers.get_("x-route-header")); } @@ -956,7 +932,7 @@ response_headers_to_remove: ["x-global-remove"] route->finalizeResponseHeaders(headers, stream_info); EXPECT_EQ("vhost-override", headers.get_("x-global-header1")); EXPECT_EQ("vhost1-www2", headers.get_("x-vhost-header1")); - EXPECT_EQ("route-allpath", headers.get_("x-route-action-header")); + EXPECT_EQ("route-allpath", headers.get_("x-route-header")); } // Multiple virtual hosts can have same virtual host level headers with different values. @@ -967,7 +943,7 @@ response_headers_to_remove: ["x-global-remove"] route->finalizeResponseHeaders(headers, stream_info); EXPECT_EQ("global1", headers.get_("x-global-header1")); EXPECT_EQ("vhost1-www2_staging", headers.get_("x-vhost-header1")); - EXPECT_EQ("route-allprefix", headers.get_("x-route-action-header")); + EXPECT_EQ("route-allprefix", headers.get_("x-route-header")); } // Global headers. @@ -4197,10 +4173,10 @@ TEST_F(CustomRequestHeadersTest, AddNewHeader) { route: prefix_rewrite: "/api/new_endpoint" cluster: www2 - request_headers_to_add: - - header: - key: x-client-ip - value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + request_headers_to_add: + - header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" request_headers_to_add: - header: key: x-client-ip @@ -4234,10 +4210,10 @@ TEST_F(CustomRequestHeadersTest, CustomHeaderWrongFormat) { route: prefix_rewrite: "/api/new_endpoint" cluster: www2 - request_headers_to_add: - - header: - key: x-client-ip - value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT" + request_headers_to_add: + - header: + key: x-client-ip + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT" request_headers_to_add: - header: key: x-client-ip diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index 7bbdc273f7d54..484b4430b005e 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -121,10 +121,10 @@ stat_prefix: header_test - match: { prefix: "/test" } route: cluster: cluster_0 - request_headers_to_add: - - header: - key: "x-real-ip" - value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" + request_headers_to_add: + - header: + key: "x-real-ip" + value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" - name: append-same-headers domains: ["append-same-headers.com"] request_headers_to_add: @@ -138,13 +138,13 @@ stat_prefix: header_test - match: { prefix: "/test" } route: cluster: cluster_0 - request_headers_to_add: - - header: - key: "x-foo" - value: "value2" - - header: - key: "authorization" - value: "token2" + request_headers_to_add: + - header: + key: "x-foo" + value: "value2" + - header: + key: "authorization" + value: "token2" )EOF"; } // namespace @@ -311,9 +311,6 @@ class HeaderIntegrationTest if (route.has_route()) { auto* route_action = route.mutable_route(); - disableHeaderValueOptionAppend(*route_action->mutable_request_headers_to_add()); - disableHeaderValueOptionAppend(*route_action->mutable_response_headers_to_add()); - if (route_action->has_weighted_clusters()) { for (auto& c : *route_action->mutable_weighted_clusters()->mutable_clusters()) { disableHeaderValueOptionAppend(*c.mutable_request_headers_to_add()); diff --git a/test/tools/router_check/test/config/TestRoutes.yaml b/test/tools/router_check/test/config/TestRoutes.yaml index 3037df2f1045f..34b665fb9fd50 100644 --- a/test/tools/router_check/test/config/TestRoutes.yaml +++ b/test/tools/router_check/test/config/TestRoutes.yaml @@ -93,10 +93,10 @@ virtual_hosts: route: cluster: ats host_rewrite: new_host - request_headers_to_add: - - header: - key: X-Client-IP - value: '%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%' + request_headers_to_add: + - header: + key: X-Client-IP + value: '%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%' - match: prefix: / route: From e2d66657e20f0d761ca5b466aa35f41277ba813e Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Mon, 1 Apr 2019 09:22:39 -0700 Subject: [PATCH 045/165] test: update redis config tests to v2 (#6443) Signed-off-by: Derek Argueta --- .../filters/network/redis_proxy/BUILD | 1 + .../network/redis_proxy/config_test.cc | 69 +++++++++---------- 2 files changed, 33 insertions(+), 37 deletions(-) diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 492404c41547e..bcc221a83f22d 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -83,6 +83,7 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_name = "envoy.filters.network.redis_proxy", deps = [ + "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/mocks/server:server_mocks", ], diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 074862e5718c8..351fc97a78c8f 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -1,6 +1,6 @@ #include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.validate.h" -#include "common/config/filter_json.h" +#include "common/protobuf/utility.h" #include "extensions/filters/network/redis_proxy/config.h" @@ -23,40 +23,39 @@ TEST(RedisProxyFilterConfigFactoryTest, ValidateFail) { ProtoValidationException); } -TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectJson) { - std::string json_string = R"EOF( - { - "cluster_name": "fake_cluster", - "stat_prefix": "foo", - "conn_pool": { - "op_timeout_ms": 20 - } - } +TEST(RedisProxyFilterConfigFactoryTest, RedisProxyNoSettings) { + const std::string yaml = R"EOF( +cluster: fake_cluster +stat_prefix: foo )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - NiceMock context; - RedisProxyFilterConfigFactory factory; - Network::FilterFactoryCb cb = factory.createFilterFactory(*json_config, context); - Network::MockConnection connection; - EXPECT_CALL(connection, addReadFilter(_)); - cb(connection); + envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config; + EXPECT_THROW_WITH_REGEX(MessageUtil::loadFromYamlAndValidate(yaml, proto_config), + ProtoValidationException, "value is required"); +} + +TEST(RedisProxyFilterConfigFactoryTest, RedisProxyNoOpTimeout) { + const std::string yaml = R"EOF( +cluster: fake_cluster +stat_prefix: foo +settings: {} + )EOF"; + + envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config; + EXPECT_THROW_WITH_REGEX(MessageUtil::loadFromYamlAndValidate(yaml, proto_config), + ProtoValidationException, "embedded message failed validation"); } TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectProto) { - std::string json_string = R"EOF( - { - "cluster_name": "fake_cluster", - "stat_prefix": "foo", - "conn_pool": { - "op_timeout_ms": 20 - } - } + const std::string yaml = R"EOF( +cluster: fake_cluster +stat_prefix: foo +settings: + op_timeout: 0.02s )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config{}; - Config::FilterJson::translateRedisProxy(*json_config, proto_config); + MessageUtil::loadFromYamlAndValidate(yaml, proto_config); NiceMock context; RedisProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); @@ -66,24 +65,20 @@ TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectProto) { } TEST(RedisProxyFilterConfigFactoryTest, RedisProxyEmptyProto) { - std::string json_string = R"EOF( - { - "cluster_name": "fake_cluster", - "stat_prefix": "foo", - "conn_pool": { - "op_timeout_ms": 20 - } - } + const std::string yaml = R"EOF( +cluster: fake_cluster +stat_prefix: foo +settings: + op_timeout: 0.02s )EOF"; - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); NiceMock context; RedisProxyFilterConfigFactory factory; envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config = *dynamic_cast( factory.createEmptyConfigProto().get()); - Config::FilterJson::translateRedisProxy(*json_config, proto_config); + MessageUtil::loadFromYamlAndValidate(yaml, proto_config); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); Network::MockConnection connection; From 1e174fff8d9884713c65cd7bf1ae00dcc6761a2e Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Mon, 1 Apr 2019 09:24:16 -0700 Subject: [PATCH 046/165] update network client_ssl_auth tests (#6444) Signed-off-by: Derek Argueta --- .../filters/network/client_ssl_auth/BUILD | 1 + .../network/client_ssl_auth/config_test.cc | 72 +++++++++---------- 2 files changed, 37 insertions(+), 36 deletions(-) diff --git a/test/extensions/filters/network/client_ssl_auth/BUILD b/test/extensions/filters/network/client_ssl_auth/BUILD index 8f0433c13c947..508f84ddea245 100644 --- a/test/extensions/filters/network/client_ssl_auth/BUILD +++ b/test/extensions/filters/network/client_ssl_auth/BUILD @@ -34,6 +34,7 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_name = "envoy.filters.network.client_ssl_auth", deps = [ + "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/client_ssl_auth:config", "//test/mocks/server:server_mocks", ], diff --git a/test/extensions/filters/network/client_ssl_auth/config_test.cc b/test/extensions/filters/network/client_ssl_auth/config_test.cc index 597e1dfadc4ab..42c7d8b9ad365 100644 --- a/test/extensions/filters/network/client_ssl_auth/config_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/config_test.cc @@ -1,6 +1,7 @@ #include "envoy/registry/registry.h" #include "common/config/filter_json.h" +#include "common/protobuf/utility.h" #include "extensions/filters/network/client_ssl_auth/config.h" #include "extensions/filters/network/well_known_names.h" @@ -19,42 +20,45 @@ namespace ClientSslAuth { class IpWhiteListConfigTest : public testing::TestWithParam {}; +const std::string ipv4_cidr_yaml = R"EOF( +- address_prefix: "192.168.3.0" + prefix_len: 24 +)EOF"; + +const std::string ipv6_cidr_yaml = R"EOF( +- address_prefix: "2001:abcd::" + prefix_len: 64 +)EOF"; + INSTANTIATE_TEST_SUITE_P(IpList, IpWhiteListConfigTest, - ::testing::Values(R"EOF(["192.168.3.0/24"])EOF", - R"EOF(["2001:abcd::/64"])EOF")); + ::testing::Values(ipv4_cidr_yaml, ipv6_cidr_yaml)); TEST_P(IpWhiteListConfigTest, ClientSslAuthCorrectJson) { - std::string json_string = R"EOF( - { - "stat_prefix": "my_stat_prefix", - "auth_api_cluster" : "fake_cluster", - "ip_white_list":)EOF" + GetParam() + - R"EOF( - } - )EOF"; - - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); + const std::string yaml = R"EOF( +stat_prefix: my_stat_prefix +auth_api_cluster: fake_cluster +ip_white_list: +)EOF" + GetParam(); + + envoy::config::filter::network::client_ssl_auth::v2::ClientSSLAuth proto_config; + MessageUtil::loadFromYamlAndValidate(yaml, proto_config); NiceMock context; ClientSslAuthConfigFactory factory; - Network::FilterFactoryCb cb = factory.createFilterFactory(*json_config, context); + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); Network::MockConnection connection; EXPECT_CALL(connection, addReadFilter(_)); cb(connection); } TEST_P(IpWhiteListConfigTest, ClientSslAuthCorrectProto) { - std::string json_string = R"EOF( - { - "stat_prefix": "my_stat_prefix", - "auth_api_cluster" : "fake_cluster", - "ip_white_list":)EOF" + GetParam() + - R"EOF( - } - )EOF"; - - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); - envoy::config::filter::network::client_ssl_auth::v2::ClientSSLAuth proto_config{}; - Envoy::Config::FilterJson::translateClientSslAuthFilter(*json_config, proto_config); + const std::string yaml = R"EOF( +stat_prefix: my_stat_prefix +auth_api_cluster: fake_cluster +ip_white_list: +)EOF" + GetParam(); + + envoy::config::filter::network::client_ssl_auth::v2::ClientSSLAuth proto_config; + MessageUtil::loadFromYamlAndValidate(yaml, proto_config); NiceMock context; ClientSslAuthConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); @@ -64,23 +68,19 @@ TEST_P(IpWhiteListConfigTest, ClientSslAuthCorrectProto) { } TEST_P(IpWhiteListConfigTest, ClientSslAuthEmptyProto) { - std::string json_string = R"EOF( - { - "stat_prefix": "my_stat_prefix", - "auth_api_cluster" : "fake_cluster", - "ip_white_list":)EOF" + GetParam() + - R"EOF( - } - )EOF"; - - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(json_string); + const std::string yaml = R"EOF( +stat_prefix: my_stat_prefix +auth_api_cluster: fake_cluster +ip_white_list: +)EOF" + GetParam(); + NiceMock context; ClientSslAuthConfigFactory factory; envoy::config::filter::network::client_ssl_auth::v2::ClientSSLAuth proto_config = *dynamic_cast( factory.createEmptyConfigProto().get()); - Envoy::Config::FilterJson::translateClientSslAuthFilter(*json_config, proto_config); + MessageUtil::loadFromYamlAndValidate(yaml, proto_config); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); Network::MockConnection connection; EXPECT_CALL(connection, addReadFilter(_)); From 83def90e431594f53fd1da34135951a15c95fbd7 Mon Sep 17 00:00:00 2001 From: Yuval Kohavi Date: Mon, 1 Apr 2019 13:31:20 -0400 Subject: [PATCH 047/165] fix null pointer access when http 1.0 request is rejected (#6417) Signed-off-by: Yuval Kohavi --- source/common/router/config_impl.cc | 11 ++++++++-- test/common/router/config_impl_test.cc | 23 ++++++++++++++++++++- test/common/router/rds_impl_test.cc | 14 ++++++++----- test/common/router/router_ratelimit_test.cc | 3 ++- 4 files changed, 42 insertions(+), 9 deletions(-) diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index af3bdb01ffc74..3cfdbc63f596c 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -991,11 +991,18 @@ RouteMatcher::RouteMatcher(const envoy::api::v2::RouteConfiguration& route_confi RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::HeaderMap& headers, uint64_t random_value) const { + // No x-forwarded-proto header. This normally only happens when ActiveStream::decodeHeaders + // bails early (as it rejects a request), so there is no routing is going to happen anyway. + const auto* forwarded_proto_header = headers.ForwardedProto(); + if (forwarded_proto_header == nullptr) { + return nullptr; + } + // First check for ssl redirect. - if (ssl_requirements_ == SslRequirements::ALL && headers.ForwardedProto()->value() != "https") { + if (ssl_requirements_ == SslRequirements::ALL && forwarded_proto_header->value() != "https") { return SSL_REDIRECT_ROUTE; } else if (ssl_requirements_ == SslRequirements::EXTERNAL_ONLY && - headers.ForwardedProto()->value() != "https" && !headers.EnvoyInternalRequest()) { + forwarded_proto_header->value() != "https" && !headers.EnvoyInternalRequest()) { return SSL_REDIRECT_ROUTE; } diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 2492fbd71029a..f17562c6e3e2b 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -82,7 +82,7 @@ Http::TestHeaderMapImpl genHeaders(const std::string& host, const std::string& p return Http::TestHeaderMapImpl{{":authority", host}, {":path", path}, {":method", method}, {"x-safe", "safe"}, {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, - {"x-route-nope", "route"}}; + {"x-route-nope", "route"}, {"x-forwarded-proto", "http"}}; } envoy::api::v2::RouteConfiguration parseRouteConfigurationFromV2Yaml(const std::string& yaml) { @@ -2728,6 +2728,27 @@ TEST_F(RouteMatcherTest, TestDomainMatchOrderConfig) { config.route(genHeaders("www.example.c", "/", "GET"), 0)->routeEntry()->clusterName()); } +TEST_F(RouteMatcherTest, NoProtocolInHeadersWhenTlsIsRequired) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www + require_tls: all + domains: + - www.lyft.com + routes: + - match: + prefix: "/" + route: + cluster: www + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + + // route may be called early in some edge cases and "x-forwarded-proto" will not be set. + Http::TestHeaderMapImpl headers{{":authority", "www.lyft.com"}, {":path", "/"}}; + EXPECT_EQ(nullptr, config.route(headers, 0)); +} + static Http::TestHeaderMapImpl genRedirectHeaders(const std::string& host, const std::string& path, bool ssl, bool internal) { Http::TestHeaderMapImpl headers{ diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 9ac30b18b9079..c73ea59e01901 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -130,6 +130,11 @@ class RdsImplTest : public RdsTestBase { factory_context_.init_manager_.initialize(init_watcher_); } + RouteConstSharedPtr route(Http::TestHeaderMapImpl headers) { + headers.addCopy("x-forwarded-proto", "http"); + return rds_->config()->route(headers, 0); + } + NiceMock scope_; NiceMock server_; std::unique_ptr route_config_provider_manager_; @@ -221,7 +226,7 @@ TEST_F(RdsImplTest, Basic) { setup(); // Make sure the initial empty route table works. - EXPECT_EQ(nullptr, rds_->config()->route(Http::TestHeaderMapImpl{{":authority", "foo"}}, 0)); + EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); EXPECT_EQ(0UL, factory_context_.scope_.gauge("foo.rds.foo_route_config.version").value()); // Initial request. @@ -245,7 +250,7 @@ TEST_F(RdsImplTest, Basic) { EXPECT_CALL(init_watcher_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); - EXPECT_EQ(nullptr, rds_->config()->route(Http::TestHeaderMapImpl{{":authority", "foo"}}, 0)); + EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); EXPECT_EQ(13237225503670494420U, factory_context_.scope_.gauge("foo.rds.foo_route_config.version").value()); @@ -259,7 +264,7 @@ TEST_F(RdsImplTest, Basic) { EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); - EXPECT_EQ(nullptr, rds_->config()->route(Http::TestHeaderMapImpl{{":authority", "foo"}}, 0)); + EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); EXPECT_EQ(13237225503670494420U, factory_context_.scope_.gauge("foo.rds.foo_route_config.version").value()); @@ -310,8 +315,7 @@ TEST_F(RdsImplTest, Basic) { EXPECT_CALL(factory_context_.cluster_manager_, get("bar")).Times(0); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(std::move(message)); - EXPECT_EQ("foo", rds_->config() - ->route(Http::TestHeaderMapImpl{{":authority", "foo"}, {":path", "/foo"}}, 0) + EXPECT_EQ("foo", route(Http::TestHeaderMapImpl{{":authority", "foo"}, {":path", "/foo"}}) ->routeEntry() ->clusterName()); diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index da129018b2522..b021cc07cb49e 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -80,7 +80,8 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { static Http::TestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, const std::string& method) { - return Http::TestHeaderMapImpl{{":authority", host}, {":path", path}, {":method", method}}; + return Http::TestHeaderMapImpl{ + {":authority", host}, {":path", path}, {":method", method}, {"x-forwarded-proto", "http"}}; } class RateLimitConfiguration : public testing::Test { From d9eb0b7ade38fbf214cf8ff4112f491107c24dcc Mon Sep 17 00:00:00 2001 From: Dan Rosen Date: Mon, 1 Apr 2019 13:33:56 -0400 Subject: [PATCH 048/165] admin: mutating without POST should fail with 405, not 400 (#6436) Signed-off-by: Dan Rosen --- docs/root/intro/version_history.rst | 1 + source/server/http/admin.cc | 4 ++-- test/server/http/admin_test.cc | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 8883a1e47b3d3..ac2168dba4ac9 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -10,6 +10,7 @@ Version history :ref:`gRPC access logger` for HTTP access logs. * access log: added new fields for downstream x509 information (URI sans and subject) to file and gRPC access logger. * admin: the admin server can now be accessed via HTTP/2 (prior knowledge). +* admin: changed HTTP response status code from 400 to 405 when attempting to GET a POST-only route (such as /quitquitquit). * buffer: fix vulnerabilities when allocation fails. * build: releases are built with GCC-7 and linked with LLD. * build: dev docker images :ref:`have been split ` from tagged images for easier diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 09b2b646fbed1..95b4f1b718714 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -1197,8 +1197,8 @@ Http::Code AdminImpl::runCallback(absl::string_view path_and_query, if (method != Http::Headers::get().MethodValues.Post) { ENVOY_LOG(error, "admin path \"{}\" mutates state, method={} rather than POST", handler.prefix_, method); - code = Http::Code::BadRequest; - response.add("Invalid request; POST required"); + code = Http::Code::MethodNotAllowed; + response.add(fmt::format("Method {} not allowed, POST required.", method)); break; } } diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index 25e45c60c24ec..533e1afaa3a5f 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -698,7 +698,7 @@ TEST_P(AdminInstanceTest, MutatesErrorWithGet) { // just issue a warning, so that scripts using curl GET commands to mutate state can be fixed. EXPECT_LOG_CONTAINS("error", "admin path \"" + path + "\" mutates state, method=GET rather than POST", - EXPECT_EQ(Http::Code::BadRequest, getCallback(path, header_map, data))); + EXPECT_EQ(Http::Code::MethodNotAllowed, getCallback(path, header_map, data))); } TEST_P(AdminInstanceTest, AdminBadProfiler) { From fd273a65e03cb14cd46891c539b623f4126114f8 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Mon, 1 Apr 2019 21:14:03 -0400 Subject: [PATCH 049/165] Speedups to FakeSymbolTableTest based on microbenchmarks from #6161. (#6293) * Speedups to FakeSymbolTableTest based on microbenchmarks from #6161. Signed-off-by: Joshua Marantz --- include/envoy/stats/symbol_table.h | 80 +++++--- source/common/stats/fake_symbol_table_impl.h | 69 +++++-- source/common/stats/symbol_table_impl.cc | 112 +++++------ source/common/stats/symbol_table_impl.h | 185 +++++++++++-------- test/common/stats/symbol_table_impl_test.cc | 12 +- test/test_common/BUILD | 2 + tools/spelling_dictionary.txt | 2 + 7 files changed, 287 insertions(+), 175 deletions(-) diff --git a/include/envoy/stats/symbol_table.h b/include/envoy/stats/symbol_table.h index b0efd1cbd75d1..4b4c8a4c4fe9e 100644 --- a/include/envoy/stats/symbol_table.h +++ b/include/envoy/stats/symbol_table.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -19,15 +20,7 @@ namespace Stats { */ class StatName; -/** - * Intermediate representation for a stat-name. This helps store multiple names - * in a single packed allocation. First we encode each desired name, then sum - * their sizes for the single packed allocation. This is used to store - * MetricImpl's tags and tagExtractedName. Like StatName, we don't want to pay - * a vptr overhead per object, and the representation is shared between the - * SymbolTable implementations, so this is just a pre-declare. - */ -class SymbolEncoding; +class StatNameList; /** * SymbolTable manages a namespace optimized for stat names, exploiting their @@ -59,22 +52,6 @@ class SymbolTable { virtual ~SymbolTable() = default; - /** - * Encodes a stat name using the symbol table, returning a SymbolEncoding. The - * SymbolEncoding is not intended for long-term storage, but is used to help - * allocate a StatName with the correct amount of storage. - * - * When a name is encoded, it bumps reference counts held in the table for - * each symbol. The caller is responsible for creating a StatName using this - * SymbolEncoding and ultimately disposing of it by calling - * SymbolTable::free(). Users are protected from leaking symbols into the pool - * by ASSERTions in the SymbolTable destructor. - * - * @param name The name to encode. - * @return SymbolEncoding the encoded symbols. - */ - virtual SymbolEncoding encode(absl::string_view name) PURE; - /** * @return uint64_t the number of symbols in the symbol table. */ @@ -116,9 +93,9 @@ class SymbolTable { * decode/encode into the elaborated form, and does not require locking the * SymbolTable. * - * The caveat is that this representation does not bump reference counts on - * the referenced Symbols in the SymbolTable, so it's only valid as long for - * the lifetime of the joined StatNames. + * Note that this method does not bump reference counts on the referenced + * Symbols in the SymbolTable, so it's only valid as long for the lifetime of + * the joined StatNames. * * This is intended for use doing cached name lookups of scoped stats, where * the scope prefix and the names to combine it with are already in StatName @@ -130,14 +107,50 @@ class SymbolTable { */ virtual StoragePtr join(const std::vector& stat_names) const PURE; + /** + * Populates a StatNameList from a list of encodings. This is not done at + * construction time to enable StatNameList to be instantiated directly in + * a class that doesn't have a live SymbolTable when it is constructed. + * + * @param names A pointer to the first name in an array, allocated by the caller. + * @param num_names The number of names. + * @param symbol_table The symbol table in which to encode the names. + */ + virtual void populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) PURE; + #ifndef ENVOY_CONFIG_COVERAGE virtual void debugPrint() const PURE; #endif + /** + * Calls the provided function with a string-view representation of the + * elaborated name. This is useful during the interim period when we + * are using FakeSymbolTableImpl, to avoid an extra allocation. Once + * we migrate to using SymbolTableImpl, this interface will no longer + * be helpful and can be removed. The reason it's useful now is that + * it makes up, in part, for some extra runtime overhead that is spent + * on the SymbolTable abstraction and API, without getting full benefit + * from the improved representation. + * + * TODO(#6307): Remove this when the transition from FakeSymbolTableImpl to + * SymbolTableImpl is complete. + * + * @param stat_name The stat name. + * @param fn The function to call with the elaborated stat name as a string_view. + */ + virtual void callWithStringView(StatName stat_name, + const std::function& fn) const PURE; + private: + friend struct HeapStatData; friend class StatNameStorage; friend class StatNameList; + // The following methods are private, but are called by friend classes + // StatNameStorage and StatNameList, which must be friendly with SymbolTable + // in order to manage the reference-counted symbols they own. + /** * Since SymbolTable does manual reference counting, a client of SymbolTable * must manually call free(symbol_vec) when it is freeing the backing store @@ -158,6 +171,17 @@ class SymbolTable { * @param stat_name the stat name. */ virtual void incRefCount(const StatName& stat_name) PURE; + + /** + * Encodes 'name' into the symbol table. Bumps reference counts for referenced + * symbols. The caller must manage the storage, and is responsible for calling + * SymbolTable::free() to release the reference counts. + * + * @param name The name to encode. + * @return The encoded name, transferring ownership to the caller. + * + */ + virtual StoragePtr encode(absl::string_view name) PURE; }; using SharedSymbolTable = std::shared_ptr; diff --git a/source/common/stats/fake_symbol_table_impl.h b/source/common/stats/fake_symbol_table_impl.h index 6c7e2f37bdf86..71560908c1574 100644 --- a/source/common/stats/fake_symbol_table_impl.h +++ b/source/common/stats/fake_symbol_table_impl.h @@ -44,12 +44,57 @@ namespace Stats { * that backs each StatName, so there is no sharing or memory savings, but also * no state associated with the SymbolTable, and thus no locks needed. * - * TODO(jmarantz): delete this class once SymbolTable is fully deployed in the + * TODO(#6307): delete this class once SymbolTable is fully deployed in the * Envoy codebase. */ class FakeSymbolTableImpl : public SymbolTable { public: - SymbolEncoding encode(absl::string_view name) override { return encodeHelper(name); } + // SymbolTable + void populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) override { + // This implementation of populateList is similar to + // SymbolTableImpl::populateList. This variant is more efficient for + // FakeSymbolTableImpl, because it avoid "encoding" each name in names. The + // strings are laid out abutting each other with 2-byte length prefixes, so + // encoding isn't needed, and doing a dummy encoding step would cost one + // memory allocation per element, adding significant overhead as measured by + // thread_local_store_speed_test. + + // We encode the number of names in a single byte, thus there must be less + // than 256 of them. + RELEASE_ASSERT(num_names < 256, "Maximum number elements in a StatNameList exceeded"); + + // First encode all the names. The '1' here represents the number of + // names. The num_names * StatNameSizeEncodingBytes reserves space for the + // lengths of each name. + size_t total_size_bytes = 1 + num_names * StatNameSizeEncodingBytes; + + for (uint32_t i = 0; i < num_names; ++i) { + total_size_bytes += names[i].size(); + } + + // Now allocate the exact number of bytes required and move the encodings + // into storage. + auto storage = std::make_unique(total_size_bytes); + uint8_t* p = &storage[0]; + *p++ = num_names; + for (uint32_t i = 0; i < num_names; ++i) { + auto& name = names[i]; + size_t sz = name.size(); + p = SymbolTableImpl::writeLengthReturningNext(sz, p); + if (!name.empty()) { + memcpy(p, name.data(), sz * sizeof(uint8_t)); + p += sz; + } + } + + // This assertion double-checks the arithmetic where we computed + // total_size_bytes. After appending all the encoded data into the + // allocated byte array, we should wind up with a pointer difference of + // total_size_bytes from the beginning of the allocation. + ASSERT(p == &storage[0] + total_size_bytes); + list.moveStorageIntoList(std::move(storage)); + } std::string toString(const StatName& stat_name) const override { return std::string(toStringView(stat_name)); @@ -60,6 +105,7 @@ class FakeSymbolTableImpl : public SymbolTable { } void free(const StatName&) override {} void incRefCount(const StatName&) override {} + StoragePtr encode(absl::string_view name) override { return encodeHelper(name); } SymbolTable::StoragePtr join(const std::vector& names) const override { std::vector strings; for (StatName name : names) { @@ -68,28 +114,27 @@ class FakeSymbolTableImpl : public SymbolTable { strings.push_back(str); } } - return stringToStorage(absl::StrJoin(strings, ".")); + return encodeHelper(absl::StrJoin(strings, ".")); } #ifndef ENVOY_CONFIG_COVERAGE void debugPrint() const override {} #endif -private: - SymbolEncoding encodeHelper(absl::string_view name) const { - SymbolEncoding encoding; - encoding.addStringForFakeSymbolTable(name); - return encoding; + void callWithStringView(StatName stat_name, + const std::function& fn) const override { + fn(toStringView(stat_name)); } +private: absl::string_view toStringView(const StatName& stat_name) const { return {reinterpret_cast(stat_name.data()), stat_name.dataSize()}; } - SymbolTable::StoragePtr stringToStorage(absl::string_view name) const { - SymbolEncoding encoding = encodeHelper(name); - auto bytes = std::make_unique(encoding.bytesRequired()); - encoding.moveToStorage(bytes.get()); + StoragePtr encodeHelper(absl::string_view name) const { + auto bytes = std::make_unique(name.size() + StatNameSizeEncodingBytes); + uint8_t* buffer = SymbolTableImpl::writeLengthReturningNext(name.size(), bytes.get()); + memcpy(buffer, name.data(), name.size()); return bytes; } }; diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index 4608886511fbe..17948ee8e90fb 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -30,7 +30,7 @@ void StatName::debugPrint() { for (uint64_t i = 0; i < nbytes; ++i) { absl::StrAppend(&msg, " ", static_cast(data()[i])); } - SymbolVec encoding = SymbolEncoding::decodeSymbols(data(), dataSize()); + SymbolVec encoding = SymbolTableImpl::Encoding::decodeSymbols(data(), dataSize()); absl::StrAppend(&msg, ", numSymbols=", encoding.size(), ":"); for (Symbol symbol : encoding) { absl::StrAppend(&msg, " ", symbol); @@ -40,9 +40,13 @@ void StatName::debugPrint() { } #endif -SymbolEncoding::~SymbolEncoding() { ASSERT(vec_.empty()); } +SymbolTableImpl::Encoding::~Encoding() { + // Verifies that moveToStorage() was called on this encoding. Failure + // to call moveToStorage() will result in leaks symbols. + ASSERT(vec_.empty()); +} -void SymbolEncoding::addSymbol(Symbol symbol) { +void SymbolTableImpl::Encoding::addSymbol(Symbol symbol) { // UTF-8-like encoding where a value 127 or less gets written as a single // byte. For higher values we write the low-order 7 bits with a 1 in // the high-order bit. Then we right-shift 7 bits and keep adding more bytes @@ -60,14 +64,8 @@ void SymbolEncoding::addSymbol(Symbol symbol) { } while (symbol != 0); } -void SymbolEncoding::addStringForFakeSymbolTable(absl::string_view str) { - if (!str.empty()) { - vec_.resize(str.size()); - memcpy(&vec_[0], str.data(), str.size()); - } -} - -SymbolVec SymbolEncoding::decodeSymbols(const SymbolTable::Storage array, uint64_t size) { +SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage array, + uint64_t size) { SymbolVec symbol_vec; Symbol symbol = 0; for (uint32_t shift = 0; size > 0; --size, ++array) { @@ -88,19 +86,9 @@ SymbolVec SymbolEncoding::decodeSymbols(const SymbolTable::Storage array, uint64 return symbol_vec; } -// Saves the specified length into the byte array, returning the next byte. -// There is no guarantee that bytes will be aligned, so we can't cast to a -// uint16_t* and assign, but must individually copy the bytes. -static inline uint8_t* saveLengthToBytesReturningNext(uint64_t length, uint8_t* bytes) { - ASSERT(length < StatNameMaxSize); - *bytes++ = length & 0xff; - *bytes++ = length >> 8; - return bytes; -} - -uint64_t SymbolEncoding::moveToStorage(SymbolTable::Storage symbol_array) { - uint64_t sz = size(); - symbol_array = saveLengthToBytesReturningNext(sz, symbol_array); +uint64_t SymbolTableImpl::Encoding::moveToStorage(SymbolTable::Storage symbol_array) { + uint64_t sz = dataBytesRequired(); + symbol_array = writeLengthReturningNext(sz, symbol_array); if (sz != 0) { memcpy(symbol_array, vec_.data(), sz * sizeof(uint8_t)); } @@ -123,11 +111,9 @@ SymbolTableImpl::~SymbolTableImpl() { // TODO(ambuc): There is a possible performance optimization here for avoiding // the encoding of IPs / numbers if they appear in stat names. We don't want to // waste time symbolizing an integer as an integer, if we can help it. -SymbolEncoding SymbolTableImpl::encode(const absl::string_view name) { - SymbolEncoding encoding; - +void SymbolTableImpl::addTokensToEncoding(const absl::string_view name, Encoding& encoding) { if (name.empty()) { - return encoding; + return; } // We want to hold the lock for the minimum amount of time, so we do the @@ -149,7 +135,6 @@ SymbolEncoding SymbolTableImpl::encode(const absl::string_view name) { for (Symbol symbol : symbols) { encoding.addSymbol(symbol); } - return encoding; } uint64_t SymbolTableImpl::numSymbols() const { @@ -159,7 +144,12 @@ uint64_t SymbolTableImpl::numSymbols() const { } std::string SymbolTableImpl::toString(const StatName& stat_name) const { - return decodeSymbolVec(SymbolEncoding::decodeSymbols(stat_name.data(), stat_name.dataSize())); + return decodeSymbolVec(Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize())); +} + +void SymbolTableImpl::callWithStringView(StatName stat_name, + const std::function& fn) const { + fn(toString(stat_name)); } std::string SymbolTableImpl::decodeSymbolVec(const SymbolVec& symbols) const { @@ -177,7 +167,7 @@ std::string SymbolTableImpl::decodeSymbolVec(const SymbolVec& symbols) const { void SymbolTableImpl::incRefCount(const StatName& stat_name) { // Before taking the lock, decode the array of symbols from the SymbolTable::Storage. - SymbolVec symbols = SymbolEncoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); + SymbolVec symbols = Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); Thread::LockGuard lock(lock_); for (Symbol symbol : symbols) { @@ -193,7 +183,7 @@ void SymbolTableImpl::incRefCount(const StatName& stat_name) { void SymbolTableImpl::free(const StatName& stat_name) { // Before taking the lock, decode the array of symbols from the SymbolTable::Storage. - SymbolVec symbols = SymbolEncoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); + SymbolVec symbols = Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); Thread::LockGuard lock(lock_); for (Symbol symbol : symbols) { @@ -265,8 +255,8 @@ bool SymbolTableImpl::lessThan(const StatName& a, const StatName& b) const { // If this becomes a performance bottleneck (e.g. during sorting), we could // provide an iterator-like interface for incrementally decoding the symbols // without allocating memory. - SymbolVec av = SymbolEncoding::decodeSymbols(a.data(), a.dataSize()); - SymbolVec bv = SymbolEncoding::decodeSymbols(b.data(), b.dataSize()); + SymbolVec av = Encoding::decodeSymbols(a.data(), a.dataSize()); + SymbolVec bv = Encoding::decodeSymbols(b.data(), b.dataSize()); // Calling fromSymbol requires holding the lock, as it needs read-access to // the maps that are written when adding new symbols. @@ -296,15 +286,20 @@ void SymbolTableImpl::debugPrint() const { } #endif -StatNameStorage::StatNameStorage(absl::string_view name, SymbolTable& table) { - SymbolEncoding encoding = table.encode(name); - bytes_ = std::make_unique(encoding.bytesRequired()); - encoding.moveToStorage(bytes_.get()); +SymbolTable::StoragePtr SymbolTableImpl::encode(absl::string_view name) { + Encoding encoding; + addTokensToEncoding(name, encoding); + auto bytes = std::make_unique(encoding.bytesRequired()); + encoding.moveToStorage(bytes.get()); + return bytes; } +StatNameStorage::StatNameStorage(absl::string_view name, SymbolTable& table) + : bytes_(table.encode(name)) {} + StatNameStorage::StatNameStorage(StatName src, SymbolTable& table) { uint64_t size = src.size(); - bytes_ = std::make_unique(size); + bytes_ = std::make_unique(size); src.copyToStorage(bytes_.get()); table.incRefCount(statName()); } @@ -327,8 +322,8 @@ SymbolTable::StoragePtr SymbolTableImpl::join(const std::vector& stat_ for (StatName stat_name : stat_names) { num_bytes += stat_name.dataSize(); } - auto bytes = std::make_unique(num_bytes + StatNameSizeEncodingBytes); - uint8_t* p = saveLengthToBytesReturningNext(num_bytes, bytes.get()); + auto bytes = std::make_unique(num_bytes + StatNameSizeEncodingBytes); + uint8_t* p = writeLengthReturningNext(num_bytes, bytes.get()); for (StatName stat_name : stat_names) { num_bytes = stat_name.dataSize(); memcpy(p, stat_name.data(), num_bytes); @@ -337,34 +332,39 @@ SymbolTable::StoragePtr SymbolTableImpl::join(const std::vector& stat_ return bytes; } -StatNameList::~StatNameList() { ASSERT(!populated()); } - -void StatNameList::populate(const std::vector& names, - SymbolTable& symbol_table) { - RELEASE_ASSERT(names.size() < 256, "Maximum number elements in a StatNameList exceeded"); +void SymbolTableImpl::populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) { + RELEASE_ASSERT(num_names < 256, "Maximum number elements in a StatNameList exceeded"); // First encode all the names. size_t total_size_bytes = 1; /* one byte for holding the number of names */ - std::vector encodings; - encodings.resize(names.size()); - int index = 0; - for (auto& name : names) { - SymbolEncoding encoding = symbol_table.encode(name); + + STACK_ARRAY(encodings, Encoding, num_names); + for (uint32_t i = 0; i < num_names; ++i) { + Encoding& encoding = encodings[i]; + addTokensToEncoding(names[i], encoding); total_size_bytes += encoding.bytesRequired(); - encodings[index++].swap(encoding); } // Now allocate the exact number of bytes required and move the encodings // into storage. - storage_ = std::make_unique(total_size_bytes); - uint8_t* p = &storage_[0]; - *p++ = encodings.size(); + auto storage = std::make_unique(total_size_bytes); + uint8_t* p = &storage[0]; + *p++ = num_names; for (auto& encoding : encodings) { p += encoding.moveToStorage(p); } - ASSERT(p == &storage_[0] + total_size_bytes); + + // This assertion double-checks the arithmetic where we computed + // total_size_bytes. After appending all the encoded data into the + // allocated byte array, we should wind up with a pointer difference of + // total_size_bytes from the beginning of the allocation. + ASSERT(p == &storage[0] + total_size_bytes); + list.moveStorageIntoList(std::move(storage)); } +StatNameList::~StatNameList() { ASSERT(!populated()); } + void StatNameList::iterate(const std::function& f) const { uint8_t* p = &storage_[0]; uint32_t num_elements = *p++; diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index f7c33ebd1d93f..ade67b7a563e1 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -15,6 +15,7 @@ #include "common/common/hash.h" #include "common/common/lock_guard.h" #include "common/common/non_copyable.h" +#include "common/common/stack_array.h" #include "common/common/thread.h" #include "common/common/utility.h" @@ -37,67 +38,6 @@ constexpr uint64_t StatNameMaxSize = 1 << (8 * StatNameSizeEncodingBytes); // 65 /** Transient representations of a vector of 32-bit symbols */ using SymbolVec = std::vector; -/** - * Represents an 8-bit encoding of a vector of symbols, used as a transient - * representation during encoding and prior to retained allocation. - */ -class SymbolEncoding { -public: - /** - * Before destructing SymbolEncoding, you must call moveToStorage. This - * transfers ownership, and in particular, the responsibility to call - * SymbolTable::clear() on all referenced symbols. If we ever wanted - * to be able to destruct a SymbolEncoding without transferring it - * we could add a clear(SymbolTable&) method. - */ - ~SymbolEncoding(); - - /** - * Encodes a token into the vec. - * - * @param symbol the symbol to encode. - */ - void addSymbol(Symbol symbol); - - /** - * Encodes an entire string into the vec, on behalf of FakeSymbolTableImpl. - * TODO(jmarantz): delete this method when FakeSymbolTableImpl is deleted. - * - * @param str The string to encode. - */ - void addStringForFakeSymbolTable(absl::string_view str); - - /** - * Decodes a uint8_t array into a SymbolVec. - */ - static SymbolVec decodeSymbols(const SymbolTable::Storage array, uint64_t size); - - /** - * Returns the number of bytes required to represent StatName as a uint8_t - * array, including the encoded size. - */ - uint64_t bytesRequired() const { return size() + StatNameSizeEncodingBytes; } - - /** - * Returns the number of uint8_t entries we collected while adding symbols. - */ - uint64_t size() const { return vec_.size(); } - - /** - * Moves the contents of the vector into an allocated array. The array - * must have been allocated with bytesRequired() bytes. - * - * @param array destination memory to receive the encoded bytes. - * @return uint64_t the number of bytes transferred. - */ - uint64_t moveToStorage(SymbolTable::Storage array); - - void swap(SymbolEncoding& src) { vec_.swap(src.vec_); } - -private: - std::vector vec_; -}; - /** * SymbolTableImpl manages a namespace optimized for stats, which are typically * composed of arrays of "."-separated tokens, with a significant overlap @@ -130,22 +70,95 @@ class SymbolEncoding { */ class SymbolTableImpl : public SymbolTable { public: + /** + * Intermediate representation for a stat-name. This helps store multiple + * names in a single packed allocation. First we encode each desired name, + * then sum their sizes for the single packed allocation. This is used to + * store MetricImpl's tags and tagExtractedName. + */ + class Encoding { + public: + /** + * Before destructing SymbolEncoding, you must call moveToStorage. This + * transfers ownership, and in particular, the responsibility to call + * SymbolTable::clear() on all referenced symbols. If we ever wanted + * to be able to destruct a SymbolEncoding without transferring it + * we could add a clear(SymbolTable&) method. + */ + ~Encoding(); + + /** + * Encodes a token into the vec. + * + * @param symbol the symbol to encode. + */ + void addSymbol(Symbol symbol); + + /** + * Decodes a uint8_t array into a SymbolVec. + */ + static SymbolVec decodeSymbols(const SymbolTable::Storage array, uint64_t size); + + /** + * Returns the number of bytes required to represent StatName as a uint8_t + * array, including the encoded size. + */ + uint64_t bytesRequired() const { return dataBytesRequired() + StatNameSizeEncodingBytes; } + + /** + * @return the number of uint8_t entries we collected while adding symbols. + */ + uint64_t dataBytesRequired() const { return vec_.size(); } + + /** + * Moves the contents of the vector into an allocated array. The array + * must have been allocated with bytesRequired() bytes. + * + * @param array destination memory to receive the encoded bytes. + * @return uint64_t the number of bytes transferred. + */ + uint64_t moveToStorage(SymbolTable::Storage array); + + private: + std::vector vec_; + }; + SymbolTableImpl(); ~SymbolTableImpl() override; // SymbolTable std::string toString(const StatName& stat_name) const override; - SymbolEncoding encode(absl::string_view name) override; uint64_t numSymbols() const override; bool lessThan(const StatName& a, const StatName& b) const override; void free(const StatName& stat_name) override; void incRefCount(const StatName& stat_name) override; SymbolTable::StoragePtr join(const std::vector& stat_names) const override; + void populateList(const absl::string_view* names, uint32_t num_names, + StatNameList& list) override; + StoragePtr encode(absl::string_view name) override; + void callWithStringView(StatName stat_name, + const std::function& fn) const override; #ifndef ENVOY_CONFIG_COVERAGE void debugPrint() const override; #endif + /** + * Saves the specified length into the byte array, returning the next byte. + * There is no guarantee that bytes will be aligned, so we can't cast to a + * uint16_t* and assign, but must individually copy the bytes. + * + * @param length the length in bytes to write. Must be < StatNameMaxSize. + * @param bytes the pointer into which to write the length. + * @return the pointer to the next byte for writing the data. + */ + static inline uint8_t* writeLengthReturningNext(uint64_t length, uint8_t* bytes) { + ASSERT(length < StatNameMaxSize); + *bytes++ = length & 0xff; + *bytes++ = length >> 8; + return bytes; + } + private: friend class StatName; friend class StatNameTest; @@ -157,7 +170,7 @@ class SymbolTableImpl : public SymbolTable { uint32_t ref_count_; }; - // This must be called during both encode() and free(). + // This must be held during both encode() and free(). mutable Thread::MutexBasicLockable lock_; /** @@ -187,9 +200,20 @@ class SymbolTableImpl : public SymbolTable { */ absl::string_view fromSymbol(Symbol symbol) const EXCLUSIVE_LOCKS_REQUIRED(lock_); - // Stages a new symbol for use. To be called after a successful insertion. + /** + * Stages a new symbol for use. To be called after a successful insertion. + */ void newSymbol(); + /** + * Tokenizes name, finds or allocates symbols for each token, and adds them + * to encoding. + * + * @param name The name to tokenize. + * @param encoding The encoding to write to. + */ + void addTokensToEncoding(absl::string_view name, Encoding& encoding); + Symbol monotonicCounter() { Thread::LockGuard lock(lock_); return monotonic_counter_; @@ -376,16 +400,6 @@ class StatNameList { public: ~StatNameList(); - /** - * Populates the StatNameList from a list of encodings. This is not done at - * construction time to enable StatNameList to be instantiated directly in - * a class that doesn't have a live SymbolTable when it is constructed. - * - * @param encodings The list names to encode. - * @param symbol_table The symbol table in which to encode the names. - */ - void populate(const std::vector& encodings, SymbolTable& symbol_table); - /** * @return true if populate() has been called on this list. */ @@ -411,7 +425,28 @@ class StatNameList { void clear(SymbolTable& symbol_table); private: - std::unique_ptr storage_; + friend class FakeSymbolTableImpl; + friend class SymbolTableImpl; + + /** + * Moves the specified storage into the list. The storage format is an + * array of bytes, organized like this: + * + * [0] The number of elements in the list (must be < 256). + * [1] low order 8 bits of the number of symbols in the first element. + * [2] high order 8 bits of the number of symbols in the first element. + * [3...] the symbols in the first element. + * ... + * + * + * For FakeSymbolTableImpl, each symbol is a single char, casted into a + * uint8_t. For SymbolTableImpl, each symbol is 1 or more bytes, in a + * variable-length encoding. See SymbolTableImpl::Encoding::addSymbol for + * details. + */ + void moveStorageIntoList(SymbolTable::StoragePtr&& storage) { storage_ = std::move(storage); } + + SymbolTable::StoragePtr storage_; }; // Helper class for constructing hash-tables with StatName keys. diff --git a/test/common/stats/symbol_table_impl_test.cc b/test/common/stats/symbol_table_impl_test.cc index 59680fd38ea09..36c86aaf43654 100644 --- a/test/common/stats/symbol_table_impl_test.cc +++ b/test/common/stats/symbol_table_impl_test.cc @@ -1,5 +1,6 @@ #include +#include "common/common/macros.h" #include "common/common/mutex_tracer_impl.h" #include "common/memory/stats.h" #include "common/stats/fake_symbol_table_impl.h" @@ -39,7 +40,9 @@ class StatNameTest : public testing::TestWithParam { break; } case SymbolTableType::Fake: - table_ = std::make_unique(); + auto table = std::make_unique(); + fake_symbol_table_ = table.get(); + table_ = std::move(table); break; } } @@ -54,7 +57,7 @@ class StatNameTest : public testing::TestWithParam { } SymbolVec getSymbols(StatName stat_name) { - return SymbolEncoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); + return SymbolTableImpl::Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); } std::string decodeSymbolVec(const SymbolVec& symbol_vec) { return real_symbol_table_->decodeSymbolVec(symbol_vec); @@ -71,6 +74,7 @@ class StatNameTest : public testing::TestWithParam { return stat_name_storage_.back().statName(); } + FakeSymbolTableImpl* fake_symbol_table_{nullptr}; SymbolTableImpl* real_symbol_table_{nullptr}; std::unique_ptr table_; @@ -268,10 +272,10 @@ TEST_P(StatNameTest, TestShrinkingExpectation) { // you don't free all the StatNames you've allocated bytes for. StatNameList // provides this capability. TEST_P(StatNameTest, List) { - std::vector names{"hello.world", "goodbye.world"}; + absl::string_view names[] = {"hello.world", "goodbye.world"}; StatNameList name_list; EXPECT_FALSE(name_list.populated()); - name_list.populate(names, *table_); + table_->populateList(names, ARRAY_SIZE(names), name_list); EXPECT_TRUE(name_list.populated()); // First, decode only the first name. diff --git a/test/test_common/BUILD b/test/test_common/BUILD index b8f15ec9147ca..6044a39b67715 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -189,7 +189,9 @@ envoy_cc_test_library( hdrs = ["simulated_time_system.h"], deps = [ ":test_time_system_interface", + "//source/common/event:event_impl_base_lib", "//source/common/event:real_time_system_lib", + "//source/common/event:timer_lib", ], ) diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index b94e12951480b..8706abfe29ad3 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -693,6 +693,8 @@ th thru tm tmp +tokenize +tokenizes tokenizing traceid trie From c90d0d4c45ee28de7c113959e1b02c295b8376c9 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Tue, 2 Apr 2019 14:37:50 -0400 Subject: [PATCH 050/165] =?UTF-8?q?server:=20add=20no-op=20implementation?= =?UTF-8?q?=20of=20ServerLifecycleNotifier=20to=20ValidationInst=E2=80=A6?= =?UTF-8?q?=20(#6461)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Elisha Ziskind --- source/server/config_validation/server.h | 7 ++++++- test/server/config_validation/server_test.cc | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 3d6493302faf4..2d0761d368046 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -53,6 +53,7 @@ bool validateConfig(const Options& options, Network::Address::InstanceConstShare class ValidationInstance : Logger::Loggable, public Instance, public ListenerComponentFactory, + public ServerLifecycleNotifier, public WorkerFactory { public: ValidationInstance(const Options& options, Event::TimeSystem& time_system, @@ -77,7 +78,7 @@ class ValidationInstance : Logger::Loggable, void getParentStats(HotRestart::GetParentStatsInfo&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } HotRestart& hotRestart() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } Init::Manager& initManager() override { return init_manager_; } - ServerLifecycleNotifier& lifecycleNotifier() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + ServerLifecycleNotifier& lifecycleNotifier() override { return *this; } ListenerManager& listenerManager() override { return *listener_manager_; } Secret::SecretManager& secretManager() override { return *secret_manager_; } Runtime::RandomGenerator& random() override { return random_generator_; } @@ -138,6 +139,10 @@ class ValidationInstance : Logger::Loggable, return nullptr; } + // ServerLifecycleNotifier + void registerCallback(Stage, StageCallback) override {} + void registerCallback(Stage, StageCallbackWithCompletion) override {} + private: void initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory); diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index b6e8c78e4003f..89b72ac0342b2 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -55,6 +55,20 @@ TEST_P(ValidationServerTest, Validate) { Filesystem::fileSystemForTest())); } +TEST_P(ValidationServerTest, NoopLifecycleNotifier) { + Thread::MutexBasicLockable access_log_lock; + Stats::IsolatedStoreImpl stats_store; + DangerousDeprecatedTestTime time_system; + ValidationInstance server(options_, time_system.timeSystem(), + Network::Address::InstanceConstSharedPtr(), stats_store, + access_log_lock, component_factory_, Thread::threadFactoryForTest(), + Filesystem::fileSystemForTest()); + server.registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit, [] { FAIL(); }); + server.registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit, + [](Event::PostCb) { FAIL(); }); + server.shutdown(); +} + // TODO(rlazarus): We'd like use this setup to replace //test/config_test (that is, run it against // all the example configs) but can't until light validation is implemented, mocking out access to // the filesystem for TLS certs, etc. In the meantime, these are the example configs that work From 7b82c0f33c26f8a1663b7cb8e6ab0278b920b687 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 2 Apr 2019 18:36:58 -0400 Subject: [PATCH 051/165] security: templates for e-mails and CVE GH filing. (#6442) These capture some of our existing patterns in CVE related e-mail announcements, as well as some future planned e-mails and GH filing. They are based on OpenSSL security release announcement and https://github.com/kubernetes/security/blob/master/email-templates.md#security-fix-announcement. Signed-off-by: Harvey Tuch --- security/email-templates.md | 131 ++++++++++++++++++++++++++++++++++++ security/gh-cve-template.md | 52 ++++++++++++++ 2 files changed, 183 insertions(+) create mode 100644 security/email-templates.md create mode 100644 security/gh-cve-template.md diff --git a/security/email-templates.md b/security/email-templates.md new file mode 100644 index 0000000000000..99175d1f64e8a --- /dev/null +++ b/security/email-templates.md @@ -0,0 +1,131 @@ +# Envoy Security Process Email Templates + +This is a collection of email templates to handle various situations the security team encounters. + +## Upcoming security release to envoy-announce@googlegroups.com + +``` +Subject: Upcoming security release of Envoy $VERSION +To: envoy-announce@googlegroups.com +Cc: envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com + +Hello Envoy Community, + +The Envoy maintainers would like to announce the forthcoming release of Envoy +$VERSION. + +This release will be made available on the $ORDINALDAY of $MONTH $YEAR at +$PDTHOUR PDT ($GMTHOUR GMT). This release will fix $NUMDEFECTS security +defect(s). The highest rated security defect is considered $SEVERITY severity. + +No further details or patches will be made available in advance of the release. + +Thanks, +$PERSON (on behalf of the Envoy maintainers) +``` + +## Upcoming security release to cncf-envoy-distributors-announce@lists.cncf.io + +``` +Subject: [CONFIDENTIAL] Further details on security release of Envoy $VERSION +To: envoy-announce@googlegroups.com +Cc: envoy-security@googlegroups.com + +Hello Envoy Distributors, + +The Envoy security team would like to provide advanced notice to the Envoy +Private Distributors List of some details on the pending Envoy $VERSION +security release, following the process described at +https://github.com/envoyproxy/envoy/blob/master/SECURITY_RELEASE_PROCESS.md. + +This release will be made available on the $ORDINALDAY of $MONTH $YEAR at +$PDTHOUR PDT ($GMTHOUR GMT). This release will fix $NUMDEFECTS security +defect(s). The highest rated security defect is considered $SEVERITY severity. + +Below we provide details of these vulnerabilities under our embargo policy +(https://github.com/envoyproxy/envoy/blob/master/SECURITY_RELEASE_PROCESS.md#embargo-policy). +This information should be treated as confidential until public release by the +Envoy maintainers on the Envoy GitHub. + +We will address the following CVE(s): + +* CVE-YEAR-ABCDEF (CVSS score $CVSS, $SEVERITY): $CVESUMMARY +... + +We intend to make candidates release patches available under embargo on the +$ORDINALDAY of $MONTH $YEAR, which you may use for testing and preparing your +distributions. + +Please direct further communication amongst private distributors to this list +or to envoy-security@googlegroups.com for direct communication with the Envoy +security team. + +Thanks, +$PERSON (on behalf of the Envoy security team) +``` + +## Security Fix Announcement + +``` +Subject: Security release of Envoy $VERSION is now available +To: envoy-announce@googlegroups.com +Cc: envoy-security@googlegroups.com, envoy-maintainers@googlegroups.com + +Hello Envoy Community, + +The Envoy maintainers would like to announce the availability of Envoy $VERSION. +This addresses the following CVE(s): + +* CVE-YEAR-ABCDEF (CVSS score $CVSS): $CVESUMMARY +... + +Upgrading to $VERSION is encouraged to fix these issues. + +GitHub tag: https://github.com/envoyproxy/envoy/releases/tag/v$VERSION +Docker images: https://hub.docker.com/r/envoyproxy/envoy/tags +Release notes: https://www.envoyproxy.io/docs/envoy/v$VERSION/intro/version_history +Docs: https://www.envoyproxy.io/docs/envoy/v$VERSION/ + +**Am I vulnerable?** + +Run `envoy --version` and if it indicates a base version of $OLDVERSION or +older you are running a vulnerable version. + + + +**How do I mitigate the vulnerability?** + + + +Avoid the use of feature XYZ in Envoy configuration. + +**How do I upgrade?** + +Update to $VERSION via your Envoy distribution or rebuild from the Envoy GitHub +source at the $VERSION tag or HEAD @ master. + +**Vulnerability Details** + + + +***CVE-YEAR-ABCDEF*** + +$CVESUMMARY + +This issue is filed as $CVE. We have rated it as [$CVSSSTRING]($CVSSURL) +($CVSS, $SEVERITY) [See the GitHub issue for more details]($GITHUBISSUEURL) + +**Thank you** + +Thank you to $REPORTER, $DEVELOPERS, and the $RELEASEMANAGERS for the +coordination in making this release. + +Thanks, + +$PERSON (on behalf of the Envoy maintainers) +``` diff --git a/security/gh-cve-template.md b/security/gh-cve-template.md new file mode 100644 index 0000000000000..8cc9f78bd2391 --- /dev/null +++ b/security/gh-cve-template.md @@ -0,0 +1,52 @@ +>This template is for public disclosure of CVE details on Envoy's GitHub. It should be filed +with the public release of a security patch version, and will be linked to in the announcement sent +to envoy-announce@googlegroups.com. The title of this issue should be the CVE identifier and it +should have the `security` label applied. + +# CVE-YEAR-ABCDEF + +## Brief description + +>Brief description used when filing CVE. + +## CVSS + +>[$CVSSSTRING]($CVSSURL)($CVSSSCORE, $SEVERITY) + +## Affected version(s) + +>Envoy x.y.z and before. + +## Affected component(s) + +>List affected internal components and features. + +## Attack vector(s) + +>How would an attacker use this? + +## Discover(s)/Credits + +>Individual and optional organization. + +## Example exploit or proof-of-concept + +>If there is proof-of-concept or example, provide a concrete example. + +## Details + +>Deep dive into the defect. This should be detailed enough to maintain a record for posterity while +being clear and concise. + +## Mitigations + +>Are there configuration or CLI options that can be used to mitigate? + +## Detection + +>How can exploitation of this bug be detected in existing and future Envoy versions? E.g. access logs. + +## References + +* CVE: $CVEURL +>Any other public information. From 2f097ddc8b6da18727660cdf3886c8da98f2302b Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Tue, 2 Apr 2019 18:40:11 -0400 Subject: [PATCH 052/165] test: Re-enable guarddog tests which were accidentally disabled due to a parameterized test mishap. (#6462) Signed-off-by: Joshua Marantz --- test/server/guarddog_impl_test.cc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 2a398ed31530b..c1e00ef037240 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -124,10 +124,18 @@ class GuardDogDeathTest : public GuardDogTestBase { WatchDogSharedPtr second_dog_; }; +INSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogDeathTest, + testing::ValuesIn({TimeSystemType::Real, TimeSystemType::Simulated})); + // These tests use threads, and need to run after the real death tests, so we need to call them // a different name. class GuardDogAlmostDeadTest : public GuardDogDeathTest {}; +INSTANTIATE_TEST_SUITE_P( + TimeSystemType, GuardDogAlmostDeadTest, + testing::ValuesIn({// TODO(#6464): TimeSystemType::Real -- fails in this suite 30/1000 times. + TimeSystemType::Simulated})); + TEST_P(GuardDogDeathTest, KillDeathTest) { // Is it German for "The Function"? Almost... auto die_function = [&]() -> void { @@ -190,6 +198,9 @@ class GuardDogMissTest : public GuardDogTestBase { NiceMock config_mega_; }; +INSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogMissTest, + testing::ValuesIn({TimeSystemType::Real, TimeSystemType::Simulated})); + TEST_P(GuardDogMissTest, MissTest) { // This test checks the actual collected statistics after doing some timer // advances that should and shouldn't increment the counters. @@ -210,6 +221,11 @@ TEST_P(GuardDogMissTest, MissTest) { } TEST_P(GuardDogMissTest, MegaMissTest) { + // TODO(#6464): This test fails in real-time 1/1000 times, but passes in simulated time. + if (GetParam() == TimeSystemType::Real) { + return; + } + // This test checks the actual collected statistics after doing some timer // advances that should and shouldn't increment the counters. initGuardDog(stats_store_, config_mega_); @@ -229,6 +245,11 @@ TEST_P(GuardDogMissTest, MegaMissTest) { } TEST_P(GuardDogMissTest, MissCountTest) { + // TODO(#6464): This test fails in real-time 9/1000 times, but passes in simulated time. + if (GetParam() == TimeSystemType::Real) { + return; + } + // This tests a flake discovered in the MissTest where real timeout or // spurious condition_variable wakeup causes the counter to get incremented // more than it should be. From d8e0f9c1cd3ad3ed6fc932d3033a6d29e62d3fa8 Mon Sep 17 00:00:00 2001 From: Dan Rosen Date: Tue, 2 Apr 2019 21:18:44 -0400 Subject: [PATCH 053/165] api: fix bazel query again (#6463) Signed-off-by: Dan Rosen --- api/envoy/config/filter/dubbo/router/v2alpha1/BUILD | 2 +- api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD b/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD index ce0ad0e254f03..51c69c0d5b20f 100644 --- a/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD +++ b/api/envoy/config/filter/dubbo/router/v2alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD index a29ebf3a88484..8719f5083f126 100644 --- a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD @@ -1,4 +1,4 @@ -load("//bazel:api_build_system.bzl", "api_proto_library_internal") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 From 339fcb9df8365230a1f35893d239c8a0ce200b5a Mon Sep 17 00:00:00 2001 From: Derek Date: Tue, 2 Apr 2019 21:22:58 -0700 Subject: [PATCH 054/165] docs: update clang format version in CI format targets (#6469) Signed-off-by: Derek Schaller --- ci/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/README.md b/ci/README.md index 9a6bcb776c11e..a5428889a98e1 100644 --- a/ci/README.md +++ b/ci/README.md @@ -91,8 +91,8 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang. * `bazel.compile_time_options` — build Envoy and test with various compile-time options toggled to their non-default state, to ensure they still build. * `bazel.clang_tidy` — build and run clang-tidy over all source files. -* `check_format`— run `clang-format-6.0` and `buildifier` on entire source tree. -* `fix_format`— run and enforce `clang-format-6.0` and `buildifier` on entire source tree. +* `check_format`— run `clang-format-7` and `buildifier` on entire source tree. +* `fix_format`— run and enforce `clang-format-7` and `buildifier` on entire source tree. * `check_spelling`— run `misspell` on entire project. * `fix_spelling`— run and enforce `misspell` on entire project. * `check_spelling_pedantic`— run `aspell` on C++ and proto comments. From a781218c299f6783ea29a178f9eeba0e4465d4a9 Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Wed, 3 Apr 2019 08:55:01 -0700 Subject: [PATCH 055/165] Added VHDS protobuf message and updated RouteConfig to include it. (#6418) Signed-off-by: Dmitri Dolguikh --- api/XDS_PROTOCOL.md | 4 +++- api/envoy/api/v2/BUILD | 2 ++ api/envoy/api/v2/rds.proto | 35 ++++++++++++++++++++++++++++++++++- tools/spelling_dictionary.txt | 1 + 4 files changed, 40 insertions(+), 2 deletions(-) diff --git a/api/XDS_PROTOCOL.md b/api/XDS_PROTOCOL.md index 75f7d8f54e0ce..2401c15c9309f 100644 --- a/api/XDS_PROTOCOL.md +++ b/api/XDS_PROTOCOL.md @@ -45,6 +45,7 @@ correspondence between an xDS API and a resource type. That is: * [LDS: `envoy.api.v2.Listener`](envoy/api/v2/lds.proto) * [RDS: `envoy.api.v2.RouteConfiguration`](envoy/api/v2/rds.proto) +* [VHDS: `envoy.api.v2.Vhds`](envoy/api/v2/rds.proto) * [CDS: `envoy.api.v2.Cluster`](envoy/api/v2/cds.proto) * [EDS: `envoy.api.v2.ClusterLoadAssignment`](envoy/api/v2/eds.proto) * [SDS: `envoy.api.v2.Auth.Secret`](envoy/api/v2/auth/cert.proto) @@ -245,7 +246,8 @@ In general, to avoid traffic drop, sequencing of updates should follow a * CDS updates (if any) must always be pushed first. * EDS updates (if any) must arrive after CDS updates for the respective clusters. * LDS updates must arrive after corresponding CDS/EDS updates. -* RDS updates related to the newly added listeners must arrive in the end. +* RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates. +* VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates. * Stale CDS clusters and related EDS endpoints (ones no longer being referenced) can then be removed. diff --git a/api/envoy/api/v2/BUILD b/api/envoy/api/v2/BUILD index f0327f8df8f9d..66efb5d30ec5e 100644 --- a/api/envoy/api/v2/BUILD +++ b/api/envoy/api/v2/BUILD @@ -129,6 +129,7 @@ api_proto_library_internal( deps = [ ":discovery", "//envoy/api/v2/core:base", + "//envoy/api/v2/core:config_source", "//envoy/api/v2/route", ], ) @@ -139,6 +140,7 @@ api_go_grpc_library( deps = [ ":discovery_go_proto", "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:config_source_go_proto", "//envoy/api/v2/route:route_go_proto", ], ) diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto index d75b68af6791f..5dd58d62d9dc2 100644 --- a/api/envoy/api/v2/rds.proto +++ b/api/envoy/api/v2/rds.proto @@ -9,6 +9,7 @@ option java_package = "io.envoyproxy.envoy.api.v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/route/route.proto"; @@ -44,7 +45,23 @@ service RouteDiscoveryService { } } -// [#comment:next free field: 9] +// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for +// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggerred +// during the processing of an HTTP request if a route for the request cannot be resolved. The +// :ref:`resource_names_subscribe ` +// field contains a list of virtual host names or aliases to track. The contents of an alias would +// be the contents of a *host* or *authority* header used to make an http request. An xDS server +// will match an alias to a virtual host based on the content of :ref:`domains' +// ` field. The *resource_names_unsubscribe* field contains +// a list of virtual host names that have been `unsubscribed +// `_ +// from the routing table associated with the RouteConfiguration. +service VirtualHostDiscoveryService { + rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { + } +} + +// [#comment:next free field: 10] message RouteConfiguration { // The name of the route configuration. For example, it might match // :ref:`route_config_name @@ -55,6 +72,15 @@ message RouteConfiguration { // An array of virtual hosts that make up the route table. repeated route.VirtualHost virtual_hosts = 2 [(gogoproto.nullable) = false]; + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used + // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration + // taking precedence. + // [#not-implemented-hide:] + Vhds vhds = 9; + // Optionally specifies a list of HTTP headers that the connection manager // will consider to be internal only. If they are found on external requests they will be cleaned // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more @@ -102,3 +128,10 @@ message RouteConfiguration { // using CDS with a static route table). google.protobuf.BoolValue validate_clusters = 7; } + +// [#not-implemented-hide:] +message Vhds { + // Configuration source specifier for VHDS. + envoy.api.v2.core.ConfigSource config_source = 1 + [(validate.rules).message.required = true, (gogoproto.nullable) = false]; +} \ No newline at end of file diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 8706abfe29ad3..b553e2f43c776 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -254,6 +254,7 @@ UTF UUID UUIDs VH +VHDS VLOG WKT WRR From 406547d814b8ee03001685694d95c665a7e41262 Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Wed, 3 Apr 2019 10:39:17 -0700 Subject: [PATCH 056/165] stats: report sample count as an integer to prevent loss of precision (#6274) When the number of samples in a histogram exceeds roughly 2^24, the value of the "+Inf" bucket in Prometheus stats output is rounded and rendered in scientific notation. This causes incorrect results in the Prometheus histogram_quantile function, which assumes that the rounding-error between the 1 hour and +Inf buckets represents some number of requests that took in excess of 1 hour. libcirclhist actually stores the sample count as a uint64_t, so stop implicitly converting it to double and output the count precisely (as we do with the non-infinite buckets). Also modifies the output format of the sum metric to avoid scientific notation. Risk Level: low Testing: added test case Doc Changes: n/a Release Notes: n/a Signed-off-by: Stephan Zuercher --- include/envoy/stats/histogram.h | 2 +- source/common/stats/histogram_impl.h | 4 +- source/server/http/admin.cc | 2 +- test/server/http/admin_test.cc | 59 ++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 4 deletions(-) diff --git a/include/envoy/stats/histogram.h b/include/envoy/stats/histogram.h index bebba3ff976e5..0cd2d6a474f33 100644 --- a/include/envoy/stats/histogram.h +++ b/include/envoy/stats/histogram.h @@ -56,7 +56,7 @@ class HistogramStatistics { * of the number of samples in the histogram, it is not guaranteed that this will be * 100% the number of samples observed. */ - virtual double sampleCount() const PURE; + virtual uint64_t sampleCount() const PURE; /** * Returns sum of all values during the period. diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index ef11744ac7163..b3fc06141c1ae 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -37,13 +37,13 @@ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { const std::vector& computedQuantiles() const override { return computed_quantiles_; } const std::vector& supportedBuckets() const override; const std::vector& computedBuckets() const override { return computed_buckets_; } - double sampleCount() const override { return sample_count_; } + uint64_t sampleCount() const override { return sample_count_; } double sampleSum() const override { return sample_sum_; } private: std::vector computed_quantiles_; std::vector computed_buckets_; - double sample_count_; + uint64_t sample_count_; double sample_sum_; }; diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 95b4f1b718714..7a8ab7c09c791 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -820,7 +820,7 @@ uint64_t PrometheusStatsFormatter::statsAsPrometheus( response.add(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", metric_name, hist_tags, stats.sampleCount())); - response.add(fmt::format("{0}_sum{{{1}}} {2}\n", metric_name, tags, stats.sampleSum())); + response.add(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", metric_name, tags, stats.sampleSum())); response.add(fmt::format("{0}_count{{{1}}} {2}\n", metric_name, tags, stats.sampleCount())); } diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index 533e1afaa3a5f..2d8beddd793d9 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -1254,6 +1254,12 @@ class HistogramWrapper { } } + void setHistogramValuesWithCounts(const std::vector>& values) { + for (std::pair cv : values) { + hist_insert_intscale(histogram_, cv.first, 0, cv.second); + } + } + private: histogram_t* histogram_; }; @@ -1402,6 +1408,59 @@ envoy_histogram1_count{} 0 EXPECT_EQ(expected_output, response.toString()); } +TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { + HistogramWrapper h1_cumulative; + + // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. + h1_cumulative.setHistogramValuesWithCounts(std::vector>({ + {1, 100000}, + {100, 1000000}, + {1000, 100000000}, + })); + + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram = std::make_shared>(); + histogram->name_ = "histogram1"; + histogram->used_ = true; + ON_CALL(*histogram, cumulativeStatistics()) + .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = + PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, false); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 0 +envoy_histogram1_bucket{le="1"} 0 +envoy_histogram1_bucket{le="5"} 100000 +envoy_histogram1_bucket{le="10"} 100000 +envoy_histogram1_bucket{le="25"} 100000 +envoy_histogram1_bucket{le="50"} 100000 +envoy_histogram1_bucket{le="100"} 100000 +envoy_histogram1_bucket{le="250"} 1100000 +envoy_histogram1_bucket{le="500"} 1100000 +envoy_histogram1_bucket{le="1000"} 1100000 +envoy_histogram1_bucket{le="2500"} 101100000 +envoy_histogram1_bucket{le="5000"} 101100000 +envoy_histogram1_bucket{le="10000"} 101100000 +envoy_histogram1_bucket{le="30000"} 101100000 +envoy_histogram1_bucket{le="60000"} 101100000 +envoy_histogram1_bucket{le="300000"} 101100000 +envoy_histogram1_bucket{le="600000"} 101100000 +envoy_histogram1_bucket{le="1800000"} 101100000 +envoy_histogram1_bucket{le="3600000"} 101100000 +envoy_histogram1_bucket{le="+Inf"} 101100000 +envoy_histogram1_sum{} 105105105000 +envoy_histogram1_count{} 101100000 +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { addCounter("cluster.test_1.upstream_cx_total", {{"a.tag-name", "a.tag-value"}}); addCounter("cluster.test_2.upstream_cx_total", {{"another_tag_name", "another_tag-value"}}); From b84c1f9dbde9b99a56638bd89b7ab7d285c95c1b Mon Sep 17 00:00:00 2001 From: Mitch Sukalski Date: Wed, 3 Apr 2019 20:17:16 -0700 Subject: [PATCH 057/165] redis: basic integration test for redis_proxy (#6450) - added a basic integration test for redis_proxy that sends a basic request and response between "client" and "server", and another that sends an invalid request (and receives the appropriate error in response) - plumbed bazel size option through envoy_cc_test() definition (optional, default is "medium") so tests can avoid timeout warnings; redis_proxy test set to size "small" Signed-off-by: Mitch Sukalski --- bazel/envoy_build_system.bzl | 4 +- .../filters/network/redis_proxy/BUILD | 11 ++ .../redis_proxy_integration_test.cc | 137 ++++++++++++++++++ 3 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index e47592fcb006d..61fdfbc557d94 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -443,7 +443,8 @@ def envoy_cc_test( args = [], shard_count = None, coverage = True, - local = False): + local = False, + size = "medium"): test_lib_tags = [] if coverage: test_lib_tags.append("coverage_test_lib") @@ -472,6 +473,7 @@ def envoy_cc_test( tags = tags + ["coverage_test"], local = local, shard_count = shard_count, + size = size, ) # Envoy C++ related test infrastructure (that want gtest, gmock, but may be diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index bcc221a83f22d..2ae0acae9a7b0 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -105,3 +105,14 @@ envoy_extension_cc_test_binary( "//test/test_common:simulated_time_system_lib", ], ) + +envoy_extension_cc_test( + name = "redis_proxy_integration_test", + size = "small", + srcs = ["redis_proxy_integration_test.cc"], + extension_name = "envoy.filters.network.redis_proxy", + deps = [ + "//source/extensions/filters/network/redis_proxy:config", + "//test/integration:integration_lib", + ], +) diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc new file mode 100644 index 0000000000000..53b62dbb2da43 --- /dev/null +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -0,0 +1,137 @@ +#include +#include + +#include "extensions/filters/network/redis_proxy/command_splitter_impl.h" + +#include "test/integration/integration.h" + +#include "gtest/gtest.h" + +namespace RedisCmdSplitter = Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitter; + +namespace Envoy { +namespace { + +const std::string REDIS_PROXY_CONFIG = R"EOF( +admin: + access_log_path: /dev/null + address: + socket_address: + address: 127.0.0.1 + port_value: 0 +static_resources: + clusters: + name: cluster_0 + hosts: + socket_address: + address: 127.0.0.1 + port_value: 0 + listeners: + name: listener_0 + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + filter_chains: + filters: + name: envoy.redis_proxy + config: + stat_prefix: redis_stats + cluster: cluster_0 + settings: + op_timeout: 5s +)EOF"; + +std::string makeBulkStringArray(std::vector&& command_strings) { + std::stringstream result; + + result << "*" << command_strings.size() << "\r\n"; + for (uint64_t i = 0; i < command_strings.size(); i++) { + result << "$" << command_strings[i].size() << "\r\n"; + result << command_strings[i] << "\r\n"; + } + + return result.str(); +} + +class RedisProxyIntegrationTest : public testing::TestWithParam, + public BaseIntegrationTest { +public: + RedisProxyIntegrationTest() : BaseIntegrationTest(GetParam(), REDIS_PROXY_CONFIG) {} + + ~RedisProxyIntegrationTest() override { + test_server_.reset(); + fake_upstreams_.clear(); + } + + void initialize() override; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +void RedisProxyIntegrationTest::initialize() { + config_helper_.renameListener("redis_proxy"); + BaseIntegrationTest::initialize(); +} + +// This test sends a simple "get foo" command from a fake +// downstream client through the proxy to a fake upstream +// Redis server. The fake server sends a valid response +// back to the client. The request and response should +// make it through the envoy proxy server code unchanged. + +TEST_P(RedisProxyIntegrationTest, SimpleRequestAndResponse) { + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("redis_proxy")); + + std::string client_to_proxy = makeBulkStringArray({"get", "foo"}); + std::string proxy_to_server; + + EXPECT_TRUE(client_to_proxy.size() > 0); + EXPECT_TRUE(client_to_proxy.find("get") != std::string::npos); + EXPECT_TRUE(client_to_proxy.find("foo") != std::string::npos); + tcp_client->write(client_to_proxy); + + FakeRawConnectionPtr fake_upstream_connection; + EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + EXPECT_TRUE(fake_upstream_connection->waitForData(client_to_proxy.size(), &proxy_to_server)); + EXPECT_EQ(client_to_proxy, proxy_to_server); + + std::string server_to_proxy = "$3\r\nbar\r\n"; // bulkstring reply of "bar" + + EXPECT_TRUE(fake_upstream_connection->write(server_to_proxy)); + tcp_client->waitForData(server_to_proxy); + EXPECT_EQ(server_to_proxy, tcp_client->data()); + + tcp_client->close(); + EXPECT_TRUE(fake_upstream_connection->close()); +} + +// This test sends an invalid Redis command from a fake +// downstream client to the envoy proxy. Envoy will respond +// with an invalid request error. + +TEST_P(RedisProxyIntegrationTest, InvalidRequest) { + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("redis_proxy")); + + std::string client_to_proxy = makeBulkStringArray({"foo"}); + + EXPECT_TRUE(client_to_proxy.size() > 0); + EXPECT_TRUE(client_to_proxy.find("foo") != std::string::npos); + tcp_client->write(client_to_proxy); + + std::stringstream error_response; + error_response << "-" << RedisCmdSplitter::Response::get().InvalidRequest << "\r\n"; + std::string proxy_to_client = error_response.str(); + + tcp_client->waitForData(proxy_to_client); + EXPECT_EQ(proxy_to_client, tcp_client->data()); + + tcp_client->close(); +} + +} // namespace +} // namespace Envoy From 1cf59d4f47ad7ab1a4e924cd66b825bb758f83c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Dziedziak?= Date: Thu, 4 Apr 2019 21:26:43 +0200 Subject: [PATCH 058/165] Added veryfication if path contains query params and add them to path header (#6466) * Description: Added veryfication if path contains query params and append path headers if it has them Risk Level: medium (affects data plane, if only for fully qualified urls) Testing: added new test cases Docs Changes: n/a Release Notes: n/a Signed-off-by: Lukasz Dziedziak --- source/common/http/http1/codec_impl.cc | 2 +- source/common/http/utility.cc | 15 +++++++++--- source/common/http/utility.h | 6 ++--- source/common/router/router.cc | 2 +- test/common/http/utility_test.cc | 33 +++++++++++++++++++++++++- 5 files changed, 49 insertions(+), 9 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 43a3c1ef4cd55..d85e642444788 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -533,7 +533,7 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho // forward the received Host field-value. headers.insertHost().value(std::string(absolute_url.host_and_port())); - headers.insertPath().value(std::string(absolute_url.path())); + headers.insertPath().value(std::string(absolute_url.path_and_query_params())); active_request_->request_url_.clear(); } diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 50083e96910ba..7cd9715a66d1d 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -57,10 +57,19 @@ bool Utility::Url::initialize(absl::string_view absolute_url) { // RFC allows the absolute-uri to not end in /, but the absolute path form // must start with if ((u.field_set & (1 << UF_PATH)) == (1 << UF_PATH) && u.field_data[UF_PATH].len > 0) { - path_ = absl::string_view(absolute_url.data() + u.field_data[UF_PATH].off, - u.field_data[UF_PATH].len); + uint64_t path_len = u.field_data[UF_PATH].len; + if ((u.field_set & (1 << UF_QUERY)) == (1 << UF_QUERY) && u.field_data[UF_QUERY].len > 0) { + path_len += 1 + u.field_data[UF_QUERY].len; + } + path_and_query_params_ = + absl::string_view(absolute_url.data() + u.field_data[UF_PATH].off, path_len); + } else if ((u.field_set & (1 << UF_QUERY)) == (1 << UF_QUERY) && u.field_data[UF_QUERY].len > 0) { + // Http parser skips question mark and starts count from first character after ? + // so we need to move left by one + path_and_query_params_ = absl::string_view(absolute_url.data() + u.field_data[UF_QUERY].off - 1, + u.field_data[UF_QUERY].len + 1); } else { - path_ = absl::string_view(kDefaultPath, 1); + path_and_query_params_ = absl::string_view(kDefaultPath, 1); } return true; } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 92e8a1e7badc3..5c4526ba9b3d2 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -24,19 +24,19 @@ namespace Utility { /** * Given a fully qualified URL, splits the string_view provided into scheme, - * host and path components. + * host and path with query parameters components. */ class Url { public: bool initialize(absl::string_view absolute_url); absl::string_view scheme() { return scheme_; } absl::string_view host_and_port() { return host_and_port_; } - absl::string_view path() { return path_; } + absl::string_view path_and_query_params() { return path_and_query_params_; } private: absl::string_view scheme_; absl::string_view host_and_port_; - absl::string_view path_; + absl::string_view path_and_query_params_; }; /** diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 3d65db36144d5..e2bd72dedd598 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -77,7 +77,7 @@ bool convertRequestHeadersForInternalRedirect(Http::HeaderMap& downstream_header // Replace the original host, scheme and path. downstream_headers.insertScheme().value(std::string(absolute_url.scheme())); downstream_headers.insertHost().value(std::string(absolute_url.host_and_port())); - downstream_headers.insertPath().value(std::string(absolute_url.path())); + downstream_headers.insertPath().value(std::string(absolute_url.path_and_query_params())); return true; } diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index dc7a29e3fb664..6afc92e72886c 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -754,7 +754,7 @@ void ValidateUrl(absl::string_view raw_url, absl::string_view expected_scheme, ASSERT_TRUE(url.initialize(raw_url)) << "Failed to initialize " << raw_url; EXPECT_EQ(url.scheme(), expected_scheme); EXPECT_EQ(url.host_and_port(), expected_host_port); - EXPECT_EQ(url.path(), expected_path); + EXPECT_EQ(url.path_and_query_params(), expected_path); } TEST(Url, ParsingTest) { @@ -766,12 +766,43 @@ TEST(Url, ParsingTest) { ValidateUrl("http://www.host.com:80/", "http", "www.host.com:80", "/"); ValidateUrl("http://www.host.com/", "http", "www.host.com", "/"); + // Test url with "?". + ValidateUrl("http://www.host.com:80/?", "http", "www.host.com:80", "/"); + ValidateUrl("http://www.host.com/?", "http", "www.host.com", "/"); + + // Test url with "?" but without slash. + ValidateUrl("http://www.host.com:80?", "http", "www.host.com:80", "/"); + ValidateUrl("http://www.host.com?", "http", "www.host.com", "/"); + // Test url with multi-character path ValidateUrl("http://www.host.com:80/path", "http", "www.host.com:80", "/path"); ValidateUrl("http://www.host.com/path", "http", "www.host.com", "/path"); + // Test url with multi-character path and ? at the end + ValidateUrl("http://www.host.com:80/path?", "http", "www.host.com:80", "/path"); + ValidateUrl("http://www.host.com/path?", "http", "www.host.com", "/path"); + // Test https scheme ValidateUrl("https://www.host.com", "https", "www.host.com", "/"); + + // Test url with query parameter + ValidateUrl("http://www.host.com:80/?query=param", "http", "www.host.com:80", "/?query=param"); + ValidateUrl("http://www.host.com/?query=param", "http", "www.host.com", "/?query=param"); + + // Test url with query parameter but without slash + ValidateUrl("http://www.host.com:80?query=param", "http", "www.host.com:80", "?query=param"); + ValidateUrl("http://www.host.com?query=param", "http", "www.host.com", "?query=param"); + + // Test url with multi-character path and query parameter + ValidateUrl("http://www.host.com:80/path?query=param", "http", "www.host.com:80", + "/path?query=param"); + ValidateUrl("http://www.host.com/path?query=param", "http", "www.host.com", "/path?query=param"); + + // Test url with multi-character path and more than one query parameter + ValidateUrl("http://www.host.com:80/path?query=param&query2=param2", "http", "www.host.com:80", + "/path?query=param&query2=param2"); + ValidateUrl("http://www.host.com/path?query=param&query2=param2", "http", "www.host.com", + "/path?query=param&query2=param2"); } } // namespace Http From b155af75fad7861e941b5939dc001abf581c9203 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 27 Mar 2019 10:35:14 -0400 Subject: [PATCH 059/165] codec: reject embedded NUL in headers. (#2) http-parser doesn't sanitize header values as per RFC 7230 today, so add an additional check (yielding a CodecProtocolException) for this. See CVE-2019-9900 for further details. Added CR/LF, in addition to NUL, as prohibited in header strings as per RFC 7230. Also added codec_impl_tests for H1 and H2 codecs to validate that NUL/LF/CR mutations in a request don't violate internal invariants. Fixes CVE-2019-9900 and oss-fuzz issue https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13613. Risk level: Low Testing: Corpus entry and unit test added. Signed-off-by: Harvey Tuch --- docs/root/intro/version_history.rst | 4 + include/envoy/http/header_map.h | 17 ++- source/common/http/header_map_impl.cc | 4 +- source/common/http/http1/codec_impl.cc | 7 ++ test/common/http/http1/codec_impl_test.cc | 61 ++++++++++ test/common/http/http2/codec_impl_test.cc | 110 +++++++++++++++--- .../filters/http/gzip/gzip_filter_test.cc | 6 +- test/integration/h1_corpus/embed_null.pb_text | 1 + tools/spelling_dictionary.txt | 1 + 9 files changed, 182 insertions(+), 29 deletions(-) create mode 100644 test/integration/h1_corpus/embed_null.pb_text diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index ac2168dba4ac9..6880dd4d1fa63 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -85,6 +85,10 @@ Version history * upstream: added configuration option to select any host when the fallback policy fails. * upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. +1.9.1 (Apr 2, 2019) +=================== +* http: fixed CVE-2019-9900 by rejecting HTTP/1.x headers with embedded NUL characters. + 1.9.0 (Dec 20, 2018) ==================== * access log: added a :ref:`JSON logging mode ` to output access logs in JSON format. diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index f67a55c607963..b68967ed8492c 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -20,6 +20,17 @@ namespace Envoy { namespace Http { +// Used by ASSERTs to validate internal consistency. E.g. valid HTTP header keys/values should +// never contain embedded NULLs. +static inline bool validHeaderString(absl::string_view s) { + for (const char c : {'\0', '\r', '\n'}) { + if (s.find(c) != absl::string_view::npos) { + return false; + } + } + return true; +} + /** * Wrapper for a lower case string used in header operations to generally avoid needless case * insensitive compares. @@ -40,9 +51,7 @@ class LowerCaseString { private: void lower() { std::transform(string_.begin(), string_.end(), string_.begin(), tolower); } - // Used by ASSERTs to validate internal consistency. E.g. valid HTTP header keys/values should - // never contain embedded NULLs. - bool valid() const { return string_.find('\0') == std::string::npos; } + bool valid() const { return validHeaderString(string_); } std::string string_; }; @@ -183,8 +192,6 @@ class HeaderString { }; void freeDynamic(); - // Used by ASSERTs to validate internal consistency. E.g. valid HTTP header keys/values should - // never contain embedded NULLs. bool valid() const; uint32_t string_length_; diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 68bfc4365d531..56185e9058ec5 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -90,9 +90,7 @@ void HeaderString::freeDynamic() { } } -bool HeaderString::valid() const { - return std::string(c_str(), string_length_).find('\0') == std::string::npos; -} +bool HeaderString::valid() const { return validHeaderString(getStringView()); } void HeaderString::append(const char* data, uint32_t size) { switch (type_) { diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index d85e642444788..9adef3d928c35 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -418,6 +418,13 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { // Ignore trailers. return; } + // http-parser should filter for this + // (https://tools.ietf.org/html/rfc7230#section-3.2.6), but it doesn't today. HeaderStrings + // have an invariant that they must not contain embedded zero characters + // (NUL, ASCII 0x0). + if (absl::string_view(data, length).find('\0') != absl::string_view::npos) { + throw CodecProtocolException("http/1.1 protocol error: header value contains NUL"); + } header_parsing_state_ = HeaderParsingState::Value; current_header_value_.append(data, length); diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index b6639dfd42373..79bea04d1c0fd 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -289,6 +289,67 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { EXPECT_EQ(0U, buffer.length()); } +// Regression test for http-parser allowing embedded NULs in header values, +// verify we reject them. +TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { + initialize(); + + InSequence sequence; + + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + Buffer::OwnedImpl buffer( + absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error: header value contains NUL"); +} + +// Mutate an HTTP GET with embedded NULs, this should always be rejected in some +// way (not necessarily with "head value contains NUL" though). +TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { + const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; + + for (size_t n = 1; n < example_input.size(); ++n) { + initialize(); + + InSequence sequence; + + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + Buffer::OwnedImpl buffer( + absl::StrCat(example_input.substr(0, n), std::string(1, '\0'), example_input.substr(n))); + EXPECT_THROW_WITH_REGEX(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error:"); + } +} + +// Mutate an HTTP GET with CR or LF. These can cause an exception or maybe +// result in a valid decodeHeaders(). In any case, the validHeaderString() +// ASSERTs should validate we never have any embedded CR or LF. +TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { + const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; + + for (const char c : {'\r', '\n'}) { + for (size_t n = 1; n < example_input.size(); ++n) { + initialize(); + + InSequence sequence; + + NiceMock decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + Buffer::OwnedImpl buffer( + absl::StrCat(example_input.substr(0, n), std::string(1, c), example_input.substr(n))); + try { + codec_->dispatch(buffer); + } catch (CodecProtocolException) { + } + } + } +} + TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { initialize(); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 759a39ddb2033..a220413d34969 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -34,7 +34,13 @@ namespace Http2 { using Http2SettingsTuple = ::testing::tuple; using Http2SettingsTestParam = ::testing::tuple; -class Http2CodecImplTest : public testing::TestWithParam { +constexpr Http2SettingsTuple + DefaultHttp2SettingsTuple(Http2Settings::DEFAULT_HPACK_TABLE_SIZE, + Http2Settings::DEFAULT_MAX_CONCURRENT_STREAMS, + Http2Settings::DEFAULT_MAX_CONCURRENT_STREAMS, + Http2Settings::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE); + +class Http2CodecImplTestFixture { public: struct ConnectionWrapper { void dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { @@ -52,9 +58,13 @@ class Http2CodecImplTest : public testing::TestWithParam Buffer::OwnedImpl buffer_; }; - void initialize() { - Http2SettingsFromTuple(client_http2settings_, ::testing::get<0>(GetParam())); - Http2SettingsFromTuple(server_http2settings_, ::testing::get<1>(GetParam())); + Http2CodecImplTestFixture(Http2SettingsTuple client_settings, Http2SettingsTuple server_settings) + : client_settings_(client_settings), server_settings_(server_settings) {} + virtual ~Http2CodecImplTestFixture() {} + + virtual void initialize() { + Http2SettingsFromTuple(client_http2settings_, client_settings_); + Http2SettingsFromTuple(server_http2settings_, server_settings_); client_ = std::make_unique(client_connection_, client_callbacks_, stats_store_, client_http2settings_, max_request_headers_kb_); @@ -76,8 +86,11 @@ class Http2CodecImplTest : public testing::TestWithParam void setupDefaultConnectionMocks() { ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - if (corrupt_data_) { - corruptFramePayload(data); + if (corrupt_metadata_frame_) { + corruptMetadataFramePayload(data); + } + if (corrupt_at_offset_ >= 0) { + corruptAtOffset(data, corrupt_at_offset_, corrupt_with_char_); } server_wrapper_.dispatch(data, *server_); })); @@ -95,22 +108,27 @@ class Http2CodecImplTest : public testing::TestWithParam setting.allow_metadata_ = allow_metadata_; } - // corruptFramePayload assumes data contains at least 10 bytes of the beginning of a frame. - void corruptFramePayload(Buffer::Instance& data) { + // corruptMetadataFramePayload assumes data contains at least 10 bytes of the beginning of a + // frame. + void corruptMetadataFramePayload(Buffer::Instance& data) { const size_t length = data.length(); const size_t corrupt_start = 10; if (length < corrupt_start || length > METADATA_MAX_PAYLOAD_SIZE) { ENVOY_LOG_MISC(error, "data size too big or too small"); return; } - uint8_t buf[METADATA_MAX_PAYLOAD_SIZE] = {0}; - data.copyOut(0, length, static_cast(buf)); - data.drain(length); - // Keeps the frame header (9 bytes) valid, and corrupts the payload. - buf[10] |= 0xff; - data.add(buf, length); + corruptAtOffset(data, corrupt_start, 0xff); + } + + void corruptAtOffset(Buffer::Instance& data, size_t index, char new_value) { + if (data.length() == 0) { + return; + } + reinterpret_cast(data.linearize(data.length()))[index % data.length()] = new_value; } + const Http2SettingsTuple client_settings_; + const Http2SettingsTuple server_settings_; bool allow_metadata_ = false; Stats::IsolatedStoreImpl stats_store_; Http2Settings client_http2settings_; @@ -128,10 +146,22 @@ class Http2CodecImplTest : public testing::TestWithParam MockStreamDecoder request_decoder_; StreamEncoder* response_encoder_{}; MockStreamCallbacks server_stream_callbacks_; - bool corrupt_data_ = false; + // Corrupt a metadata frame payload. + bool corrupt_metadata_frame_ = false; + // Corrupt frame at a given offset (if positive). + ssize_t corrupt_at_offset_{-1}; + char corrupt_with_char_{'\0'}; + uint32_t max_request_headers_kb_ = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; }; +class Http2CodecImplTest : public ::testing::TestWithParam, + protected Http2CodecImplTestFixture { +public: + Http2CodecImplTest() + : Http2CodecImplTestFixture(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam())) {} +}; + TEST_P(Http2CodecImplTest, ShutdownNotice) { initialize(); @@ -191,7 +221,7 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); -}; +} TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { initialize(); @@ -242,7 +272,7 @@ TEST_P(Http2CodecImplTest, Invalid103) { EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), CodecProtocolException, "Unexpected 'trailers' with no end stream."); EXPECT_EQ(1, stats_store_.counter("http2.too_many_header_frames").value()); -}; +} TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { initialize(); @@ -444,7 +474,7 @@ TEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) { MetadataMapVector metadata_map_vector; metadata_map_vector.push_back(std::move(metadata_map_ptr)); - corrupt_data_ = true; + corrupt_metadata_frame_ = true; EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), EnvoyException, "The user callback function failed"); } @@ -1011,6 +1041,50 @@ TEST_P(Http2CodecImplTest, TestCodecHeaderCompression) { } } +// Validate that nghttp2 rejects NUL/CR/LF as per +// https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. +// TEST_P(Http2CodecImplTest, InvalidHeaderChars) { +// TODO(htuch): Write me. Http2CodecImplMutationTest basically covers this, +// but we could be a bit more specific and add a captured H2 HEADERS frame +// here and inject it with mutation of just the header value, ensuring we get +// the expected codec exception. +// } + +class Http2CodecImplMutationTest : public ::testing::TestWithParam<::testing::tuple>, + protected Http2CodecImplTestFixture { +public: + Http2CodecImplMutationTest() + : Http2CodecImplTestFixture(DefaultHttp2SettingsTuple, DefaultHttp2SettingsTuple) {} + + void initialize() override { + corrupt_with_char_ = ::testing::get<0>(GetParam()); + corrupt_at_offset_ = ::testing::get<1>(GetParam()); + Http2CodecImplTestFixture::initialize(); + } +}; + +INSTANTIATE_TEST_SUITE_P(Http2CodecImplMutationTest, Http2CodecImplMutationTest, + ::testing::Combine(::testing::ValuesIn({'\0', '\r', '\n'}), + ::testing::Range(0, 128))); + +// Mutate an arbitrary offset in the HEADERS frame with NUL/CR/LF. This should +// either throw an exception or continue, but we shouldn't crash due to +// validHeaderString() ASSERTs. +TEST_P(Http2CodecImplMutationTest, HandleInvalidChars) { + initialize(); + + TestHeaderMapImpl request_headers; + request_headers.addCopy("foo", "barbaz"); + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); + EXPECT_CALL(client_callbacks_, onGoAway()).Times(AnyNumber()); + try { + request_encoder_->encodeHeaders(request_headers, true); + } catch (const CodecProtocolException& e) { + ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); + } +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index 685b21a5e688d..80d9b0df95234 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -208,7 +208,7 @@ TEST_F(GzipFilterTest, isAcceptEncodingAllowed) { } { Http::TestHeaderMapImpl headers = { - {"accept-encoding", "\tdeflate\t, gzip\t ; q\t =\t 1.0,\t * ;q=0.5\n"}}; + {"accept-encoding", "\tdeflate\t, gzip\t ; q\t =\t 1.0,\t * ;q=0.5"}}; EXPECT_TRUE(isAcceptEncodingAllowed(headers)); EXPECT_EQ(3, stats_.counter("test.gzip.header_gzip").value()); } @@ -416,7 +416,7 @@ TEST_F(GzipFilterTest, isContentTypeAllowed) { EXPECT_TRUE(isContentTypeAllowed(headers)); } { - Http::TestHeaderMapImpl headers = {{"content-type", "\ttext/html\t\n"}}; + Http::TestHeaderMapImpl headers = {{"content-type", "\ttext/html\t"}}; EXPECT_TRUE(isContentTypeAllowed(headers)); } @@ -588,7 +588,7 @@ TEST_F(GzipFilterTest, isTransferEncodingAllowed) { EXPECT_FALSE(isTransferEncodingAllowed(headers)); } { - Http::TestHeaderMapImpl headers = {{"transfer-encoding", " gzip\t, chunked\t\n"}}; + Http::TestHeaderMapImpl headers = {{"transfer-encoding", " gzip\t, chunked\t"}}; EXPECT_FALSE(isTransferEncodingAllowed(headers)); } } diff --git a/test/integration/h1_corpus/embed_null.pb_text b/test/integration/h1_corpus/embed_null.pb_text new file mode 100644 index 0000000000000..c15fc60b65dad --- /dev/null +++ b/test/integration/h1_corpus/embed_null.pb_text @@ -0,0 +1 @@ +events { downstream_send_bytes: "POST /\nntnt: � \0 " } diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index b553e2f43c776..01b8c8658d7c9 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -137,6 +137,7 @@ LC LDS LEV LHS +LF MB MD MGET From c22cfd2c483fc26534382a0b6835f45264bb137a Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 27 Mar 2019 12:51:45 -0400 Subject: [PATCH 060/165] build: import manually minified Chrome URL lib. (#3) This is a manually minified variant of https://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz, providing just the parts needed for url::CanonicalizePath(). This is intended to support a security release fix for CVE-2019-9901. Long term we need this to be moved to absl or QUICHE for upgrades and long-term support. Some specific transforms of interest: * url_parse.h is minified to just Component and flattened back into the URL directory. It does not contain any non-Chromium authored code any longer and so does not have a separate LICENSE. * envoy_shim.h adapts various macros to the Envoy context. 8 Anything not reachable from url::CanonicalizePath() has been dropped. * Header include paths have changed as needed. * BUILD was manually written. * Various clang-tidy and format fixes. Risk level: Low Testing: Validated with WiP PR for CVE-2019-9901. Signed-off-by: Harvey Tuch --- ci/run_clang_tidy.sh | 14 +- source/common/chromium_url/BUILD | 28 ++ source/common/chromium_url/LICENSE | 27 ++ source/common/chromium_url/README.md | 15 + source/common/chromium_url/envoy_shim.h | 17 + source/common/chromium_url/url_canon.cc | 16 + source/common/chromium_url/url_canon.h | 186 ++++++++ .../common/chromium_url/url_canon_internal.cc | 295 +++++++++++++ .../common/chromium_url/url_canon_internal.h | 246 +++++++++++ source/common/chromium_url/url_canon_path.cc | 417 ++++++++++++++++++ .../chromium_url/url_canon_stdstring.cc | 33 ++ .../common/chromium_url/url_canon_stdstring.h | 58 +++ source/common/chromium_url/url_parse.h | 49 ++ .../common/chromium_url/url_parse_internal.h | 18 + tools/spelling_dictionary.txt | 11 + 15 files changed, 1428 insertions(+), 2 deletions(-) create mode 100644 source/common/chromium_url/BUILD create mode 100644 source/common/chromium_url/LICENSE create mode 100644 source/common/chromium_url/README.md create mode 100644 source/common/chromium_url/envoy_shim.h create mode 100644 source/common/chromium_url/url_canon.cc create mode 100644 source/common/chromium_url/url_canon.h create mode 100644 source/common/chromium_url/url_canon_internal.cc create mode 100644 source/common/chromium_url/url_canon_internal.h create mode 100644 source/common/chromium_url/url_canon_path.cc create mode 100644 source/common/chromium_url/url_canon_stdstring.cc create mode 100644 source/common/chromium_url/url_canon_stdstring.h create mode 100644 source/common/chromium_url/url_parse.h create mode 100644 source/common/chromium_url/url_parse_internal.h diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index e9df93bfcb652..29d4381b51828 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -21,15 +21,25 @@ function exclude_testdata() { grep -v tools/testdata/check_format/ } +# Do not run clang-tidy against Chromium URL import, this needs to largely +# reflect the upstream structure. +function exclude_chromium_url() { + grep -v source/common/chromium_url/ +} + +function filter_excludes() { + exclude_testdata | exclude_chromium_url +} + if [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running full clang-tidy..." run-clang-tidy-7 elif [[ -z "${CIRCLE_PR_NUMBER}" && "$CIRCLE_BRANCH" == "master" ]]; then echo "On master branch, running clang-tidy-diff against previous commit..." - git diff HEAD^ | exclude_testdata | clang-tidy-diff-7.py -p 1 + git diff HEAD^ | filter_excludes | clang-tidy-diff-7.py -p 1 else echo "Running clang-tidy-diff against master branch..." git fetch https://github.com/envoyproxy/envoy.git master - git diff $(git merge-base HEAD FETCH_HEAD)..HEAD | exclude_testdata | \ + git diff $(git merge-base HEAD FETCH_HEAD)..HEAD | filter_excludes | \ clang-tidy-diff-7.py -p 1 fi diff --git a/source/common/chromium_url/BUILD b/source/common/chromium_url/BUILD new file mode 100644 index 0000000000000..9b07e76b00130 --- /dev/null +++ b/source/common/chromium_url/BUILD @@ -0,0 +1,28 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "chromium_url", + srcs = [ + "url_canon.cc", + "url_canon_internal.cc", + "url_canon_path.cc", + "url_canon_stdstring.cc", + ], + hdrs = [ + "envoy_shim.h", + "url_canon.h", + "url_canon_internal.h", + "url_canon_stdstring.h", + "url_parse.h", + "url_parse_internal.h", + ], + deps = ["//source/common/common:assert_lib"], +) diff --git a/source/common/chromium_url/LICENSE b/source/common/chromium_url/LICENSE new file mode 100644 index 0000000000000..a32e00ce6be36 --- /dev/null +++ b/source/common/chromium_url/LICENSE @@ -0,0 +1,27 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/common/chromium_url/README.md b/source/common/chromium_url/README.md new file mode 100644 index 0000000000000..64d28b315dd20 --- /dev/null +++ b/source/common/chromium_url/README.md @@ -0,0 +1,15 @@ +This is a manually minified variant of +https://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz, +providing just the parts needed for `url::CanonicalizePath()`. This is intended +to support a security release fix for CVE-2019-9901. Long term we need this to +be moved to absl or QUICHE for upgrades and long-term support. + +Some specific transforms of interest: +* `url_parse.h` is minified to just `Component` and flattened back into the URL + directory. It does not contain any non-Chromium authored code any longer and + so does not have a separate LICENSE. +* `envoy_shim.h` adapts various macros to the Envoy context. +* Anything not reachable from `url::CanonicalizePath()` has been dropped. +* Header include paths have changed as needed. +* BUILD was manually written. +* Various clang-tidy and format fixes. diff --git a/source/common/chromium_url/envoy_shim.h b/source/common/chromium_url/envoy_shim.h new file mode 100644 index 0000000000000..2b7443926c1f5 --- /dev/null +++ b/source/common/chromium_url/envoy_shim.h @@ -0,0 +1,17 @@ +#pragma once + +#include "common/common/assert.h" + +// This is a minimal Envoy adaptation layer for the Chromium URL library. +// NOLINT(namespace-envoy) + +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete + +#define EXPORT_TEMPLATE_DECLARE(x) +#define EXPORT_TEMPLATE_DEFINE(x) +#define COMPONENT_EXPORT(x) + +#define DCHECK(x) ASSERT(x) +#define NOTREACHED() NOT_REACHED_GCOVR_EXCL_LINE diff --git a/source/common/chromium_url/url_canon.cc b/source/common/chromium_url/url_canon.cc new file mode 100644 index 0000000000000..91926b6f237b6 --- /dev/null +++ b/source/common/chromium_url/url_canon.cc @@ -0,0 +1,16 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon.h" + +#include "common/chromium_url/envoy_shim.h" + +namespace url { + +template class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT; + +} // namespace url diff --git a/source/common/chromium_url/url_canon.h b/source/common/chromium_url/url_canon.h new file mode 100644 index 0000000000000..0f66374c60c4f --- /dev/null +++ b/source/common/chromium_url/url_canon.h @@ -0,0 +1,186 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_H_ +#define URL_URL_CANON_H_ + +#include +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_parse.h" + +namespace url { + +// Canonicalizer output ------------------------------------------------------- + +// Base class for the canonicalizer output, this maintains a buffer and +// supports simple resizing and append operations on it. +// +// It is VERY IMPORTANT that no virtual function calls be made on the common +// code path. We only have two virtual function calls, the destructor and a +// resize function that is called when the existing buffer is not big enough. +// The derived class is then in charge of setting up our buffer which we will +// manage. +template class CanonOutputT { +public: + CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {} + virtual ~CanonOutputT() {} + + // Implemented to resize the buffer. This function should update the buffer + // pointer to point to the new buffer, and any old data up to |cur_len_| in + // the buffer must be copied over. + // + // The new size |sz| must be larger than buffer_len_. + virtual void Resize(int sz) = 0; + + // Accessor for returning a character at a given position. The input offset + // must be in the valid range. + inline T at(int offset) const { return buffer_[offset]; } + + // Sets the character at the given position. The given position MUST be less + // than the length(). + inline void set(int offset, T ch) { buffer_[offset] = ch; } + + // Returns the number of characters currently in the buffer. + inline int length() const { return cur_len_; } + + // Returns the current capacity of the buffer. The length() is the number of + // characters that have been declared to be written, but the capacity() is + // the number that can be written without reallocation. If the caller must + // write many characters at once, it can make sure there is enough capacity, + // write the data, then use set_size() to declare the new length(). + int capacity() const { return buffer_len_; } + + // Called by the user of this class to get the output. The output will NOT + // be NULL-terminated. Call length() to get the + // length. + const T* data() const { return buffer_; } + T* data() { return buffer_; } + + // Shortens the URL to the new length. Used for "backing up" when processing + // relative paths. This can also be used if an external function writes a lot + // of data to the buffer (when using the "Raw" version below) beyond the end, + // to declare the new length. + // + // This MUST NOT be used to expand the size of the buffer beyond capacity(). + void set_length(int new_len) { cur_len_ = new_len; } + + // This is the most performance critical function, since it is called for + // every character. + void push_back(T ch) { + // In VC2005, putting this common case first speeds up execution + // dramatically because this branch is predicted as taken. + if (cur_len_ < buffer_len_) { + buffer_[cur_len_] = ch; + cur_len_++; + return; + } + + // Grow the buffer to hold at least one more item. Hopefully we won't have + // to do this very often. + if (!Grow(1)) + return; + + // Actually do the insertion. + buffer_[cur_len_] = ch; + cur_len_++; + } + + // Appends the given string to the output. + void Append(const T* str, int str_len) { + if (cur_len_ + str_len > buffer_len_) { + if (!Grow(cur_len_ + str_len - buffer_len_)) + return; + } + for (int i = 0; i < str_len; i++) + buffer_[cur_len_ + i] = str[i]; + cur_len_ += str_len; + } + + void ReserveSizeIfNeeded(int estimated_size) { + // Reserve a bit extra to account for escaped chars. + if (estimated_size > buffer_len_) + Resize(estimated_size + 8); + } + +protected: + // Grows the given buffer so that it can fit at least |min_additional| + // characters. Returns true if the buffer could be resized, false on OOM. + bool Grow(int min_additional) { + static const int kMinBufferLen = 16; + int new_len = (buffer_len_ == 0) ? kMinBufferLen : buffer_len_; + do { + if (new_len >= (1 << 30)) // Prevent overflow below. + return false; + new_len *= 2; + } while (new_len < buffer_len_ + min_additional); + Resize(new_len); + return true; + } + + T* buffer_; + int buffer_len_; + + // Used characters in the buffer. + int cur_len_; +}; + +// Simple implementation of the CanonOutput using new[]. This class +// also supports a static buffer so if it is allocated on the stack, most +// URLs can be canonicalized with no heap allocations. +template class RawCanonOutputT : public CanonOutputT { +public: + RawCanonOutputT() : CanonOutputT() { + this->buffer_ = fixed_buffer_; + this->buffer_len_ = fixed_capacity; + } + ~RawCanonOutputT() override { + if (this->buffer_ != fixed_buffer_) + delete[] this->buffer_; + } + + void Resize(int sz) override { + T* new_buf = new T[sz]; + memcpy(new_buf, this->buffer_, sizeof(T) * (this->cur_len_ < sz ? this->cur_len_ : sz)); + if (this->buffer_ != fixed_buffer_) + delete[] this->buffer_; + this->buffer_ = new_buf; + this->buffer_len_ = sz; + } + +protected: + T fixed_buffer_[fixed_capacity]; +}; + +// Explicitly instantiate commonly used instantiations. +extern template class EXPORT_TEMPLATE_DECLARE(COMPONENT_EXPORT(URL)) CanonOutputT; + +// Normally, all canonicalization output is in narrow characters. We support +// the templates so it can also be used internally if a wide buffer is +// required. +typedef CanonOutputT CanonOutput; + +template +class RawCanonOutput : public RawCanonOutputT {}; + +// Path. If the input does not begin in a slash (including if the input is +// empty), we'll prepend a slash to the path to make it canonical. +// +// The 8-bit version assumes UTF-8 encoding, but does not verify the validity +// of the UTF-8 (i.e., you can have invalid UTF-8 sequences, invalid +// characters, etc.). Normally, URLs will come in as UTF-16, so this isn't +// an issue. Somebody giving us an 8-bit path is responsible for generating +// the path that the server expects (we'll escape high-bit characters), so +// if something is invalid, it's their problem. +COMPONENT_EXPORT(URL) +bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, + Component* out_path); + +} // namespace url + +#endif // URL_URL_CANON_H_ diff --git a/source/common/chromium_url/url_canon_internal.cc b/source/common/chromium_url/url_canon_internal.cc new file mode 100644 index 0000000000000..7aeb4f3de1b88 --- /dev/null +++ b/source/common/chromium_url/url_canon_internal.cc @@ -0,0 +1,295 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon_internal.h" + +namespace url { + +// See the header file for this array's declaration. +const unsigned char kSharedCharTypeTable[0x100] = { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x00 - 0x0f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x10 - 0x1f + 0, // 0x20 ' ' (escape spaces in queries) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x21 ! + 0, // 0x22 " + 0, // 0x23 # (invalid in query since it marks the ref) + CHAR_QUERY | CHAR_USERINFO, // 0x24 $ + CHAR_QUERY | CHAR_USERINFO, // 0x25 % + CHAR_QUERY | CHAR_USERINFO, // 0x26 & + 0, // 0x27 ' (Try to prevent XSS.) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x28 ( + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x29 ) + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2a * + CHAR_QUERY | CHAR_USERINFO, // 0x2b + + CHAR_QUERY | CHAR_USERINFO, // 0x2c , + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2d - + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x2e . + CHAR_QUERY, // 0x2f / + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x30 0 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x31 1 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x32 2 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x33 3 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x34 4 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x35 5 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x36 6 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | + CHAR_COMPONENT, // 0x37 7 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x38 8 + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x39 9 + CHAR_QUERY, // 0x3a : + CHAR_QUERY, // 0x3b ; + 0, // 0x3c < (Try to prevent certain types of XSS.) + CHAR_QUERY, // 0x3d = + 0, // 0x3e > (Try to prevent certain types of XSS.) + CHAR_QUERY, // 0x3f ? + CHAR_QUERY, // 0x40 @ + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x41 A + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x42 B + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x43 C + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x44 D + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x45 E + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x46 F + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x47 G + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x48 H + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x49 I + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4a J + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4b K + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4c L + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4d M + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4e N + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4f O + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x50 P + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x51 Q + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x52 R + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x53 S + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x54 T + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x55 U + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x56 V + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x57 W + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x58 X + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x59 Y + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5a Z + CHAR_QUERY, // 0x5b [ + CHAR_QUERY, // 0x5c '\' + CHAR_QUERY, // 0x5d ] + CHAR_QUERY, // 0x5e ^ + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5f _ + CHAR_QUERY, // 0x60 ` + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x61 a + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x62 b + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x63 c + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x64 d + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x65 e + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x66 f + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x67 g + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x68 h + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x69 i + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6a j + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6b k + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6c l + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6d m + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6e n + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6f o + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x70 p + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x71 q + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x72 r + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x73 s + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x74 t + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x75 u + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x76 v + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x77 w + CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x78 x + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x79 y + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7a z + CHAR_QUERY, // 0x7b { + CHAR_QUERY, // 0x7c | + CHAR_QUERY, // 0x7d } + CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7e ~ + 0, // 0x7f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x80 - 0x8f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0x90 - 0x9f + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xa0 - 0xaf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xb0 - 0xbf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xc0 - 0xcf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xd0 - 0xdf + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xe0 - 0xef + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, // 0xf0 - 0xff +}; + +const char kHexCharLookup[0x10] = { + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', +}; + +const char kCharToHexLookup[8] = { + 0, // 0x00 - 0x1f + '0', // 0x20 - 0x3f: digits 0 - 9 are 0x30 - 0x39 + 'A' - 10, // 0x40 - 0x5f: letters A - F are 0x41 - 0x46 + 'a' - 10, // 0x60 - 0x7f: letters a - f are 0x61 - 0x66 + 0, // 0x80 - 0x9F + 0, // 0xA0 - 0xBF + 0, // 0xC0 - 0xDF + 0, // 0xE0 - 0xFF +}; + +} // namespace url diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h new file mode 100644 index 0000000000000..63960665fc682 --- /dev/null +++ b/source/common/chromium_url/url_canon_internal.h @@ -0,0 +1,246 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_INTERNAL_H_ +#define URL_URL_CANON_INTERNAL_H_ + +// This file is intended to be included in another C++ file where the character +// types are defined. This allows us to write mostly generic code, but not have +// template bloat because everything is inlined when anybody calls any of our +// functions. + +#include +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_canon.h" + +namespace url { + +// Character type handling ----------------------------------------------------- + +// Bits that identify different character types. These types identify different +// bits that are set for each 8-bit character in the kSharedCharTypeTable. +enum SharedCharTypes { + // Characters that do not require escaping in queries. Characters that do + // not have this flag will be escaped; see url_canon_query.cc + CHAR_QUERY = 1, + + // Valid in the username/password field. + CHAR_USERINFO = 2, + + // Valid in a IPv4 address (digits plus dot and 'x' for hex). + CHAR_IPV4 = 4, + + // Valid in an ASCII-representation of a hex digit (as in %-escaped). + CHAR_HEX = 8, + + // Valid in an ASCII-representation of a decimal digit. + CHAR_DEC = 16, + + // Valid in an ASCII-representation of an octal digit. + CHAR_OCT = 32, + + // Characters that do not require escaping in encodeURIComponent. Characters + // that do not have this flag will be escaped; see url_util.cc. + CHAR_COMPONENT = 64, +}; + +// This table contains the flags in SharedCharTypes for each 8-bit character. +// Some canonicalization functions have their own specialized lookup table. +// For those with simple requirements, we have collected the flags in one +// place so there are fewer lookup tables to load into the CPU cache. +// +// Using an unsigned char type has a small but measurable performance benefit +// over using a 32-bit number. +extern const unsigned char kSharedCharTypeTable[0x100]; + +// More readable wrappers around the character type lookup table. +inline bool IsCharOfType(unsigned char c, SharedCharTypes type) { + return !!(kSharedCharTypeTable[c] & type); +} +inline bool IsQueryChar(unsigned char c) { return IsCharOfType(c, CHAR_QUERY); } +inline bool IsIPv4Char(unsigned char c) { return IsCharOfType(c, CHAR_IPV4); } +inline bool IsHexChar(unsigned char c) { return IsCharOfType(c, CHAR_HEX); } +inline bool IsComponentChar(unsigned char c) { return IsCharOfType(c, CHAR_COMPONENT); } + +// Maps the hex numerical values 0x0 to 0xf to the corresponding ASCII digit +// that will be used to represent it. +COMPONENT_EXPORT(URL) extern const char kHexCharLookup[0x10]; + +// This lookup table allows fast conversion between ASCII hex letters and their +// corresponding numerical value. The 8-bit range is divided up into 8 +// regions of 0x20 characters each. Each of the three character types (numbers, +// uppercase, lowercase) falls into different regions of this range. The table +// contains the amount to subtract from characters in that range to get at +// the corresponding numerical value. +// +// See HexDigitToValue for the lookup. +extern const char kCharToHexLookup[8]; + +// Assumes the input is a valid hex digit! Call IsHexChar before using this. +inline unsigned char HexCharToValue(unsigned char c) { return c - kCharToHexLookup[c / 0x20]; } + +// Indicates if the given character is a dot or dot equivalent, returning the +// number of characters taken by it. This will be one for a literal dot, 3 for +// an escaped dot. If the character is not a dot, this will return 0. +template inline int IsDot(const CHAR* spec, int offset, int end) { + if (spec[offset] == '.') { + return 1; + } else if (spec[offset] == '%' && offset + 3 <= end && spec[offset + 1] == '2' && + (spec[offset + 2] == 'e' || spec[offset + 2] == 'E')) { + // Found "%2e" + return 3; + } + return 0; +} + +// Write a single character, escaped, to the output. This always escapes: it +// does no checking that thee character requires escaping. +// Escaping makes sense only 8 bit chars, so code works in all cases of +// input parameters (8/16bit). +template +inline void AppendEscapedChar(UINCHAR ch, CanonOutputT* output) { + output->push_back('%'); + output->push_back(kHexCharLookup[(ch >> 4) & 0xf]); + output->push_back(kHexCharLookup[ch & 0xf]); +} + +// UTF-8 functions ------------------------------------------------------------ + +// Reads one character in UTF-8 starting at |*begin| in |str| and places +// the decoded value into |*code_point|. If the character is valid, we will +// return true. If invalid, we'll return false and put the +// kUnicodeReplacementCharacter into |*code_point|. +// +// |*begin| will be updated to point to the last character consumed so it +// can be incremented in a loop and will be ready for the next character. +// (for a single-byte ASCII character, it will not be changed). +COMPONENT_EXPORT(URL) +bool ReadUTFChar(const char* str, int* begin, int length, unsigned* code_point_out); + +// Generic To-UTF-8 converter. This will call the given append method for each +// character that should be appended, with the given output method. Wrappers +// are provided below for escaped and non-escaped versions of this. +// +// The char_value must have already been checked that it's a valid Unicode +// character. +template +inline void DoAppendUTF8(unsigned char_value, Output* output) { + if (char_value <= 0x7f) { + Appender(static_cast(char_value), output); + } else if (char_value <= 0x7ff) { + // 110xxxxx 10xxxxxx + Appender(static_cast(0xC0 | (char_value >> 6)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else if (char_value <= 0xffff) { + // 1110xxxx 10xxxxxx 10xxxxxx + Appender(static_cast(0xe0 | (char_value >> 12)), output); + Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else if (char_value <= 0x10FFFF) { // Max Unicode code point. + // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + Appender(static_cast(0xf0 | (char_value >> 18)), output); + Appender(static_cast(0x80 | ((char_value >> 12) & 0x3f)), output); + Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); + Appender(static_cast(0x80 | (char_value & 0x3f)), output); + } else { + // Invalid UTF-8 character (>20 bits). + NOTREACHED(); + } +} + +// Helper used by AppendUTF8Value below. We use an unsigned parameter so there +// are no funny sign problems with the input, but then have to convert it to +// a regular char for appending. +inline void AppendCharToOutput(unsigned char ch, CanonOutput* output) { + output->push_back(static_cast(ch)); +} + +// Writes the given character to the output as UTF-8. This does NO checking +// of the validity of the Unicode characters; the caller should ensure that +// the value it is appending is valid to append. +inline void AppendUTF8Value(unsigned char_value, CanonOutput* output) { + DoAppendUTF8(char_value, output); +} + +// Writes the given character to the output as UTF-8, escaping ALL +// characters (even when they are ASCII). This does NO checking of the +// validity of the Unicode characters; the caller should ensure that the value +// it is appending is valid to append. +inline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) { + DoAppendUTF8(char_value, output); +} + +// Escaping functions --------------------------------------------------------- + +// Writes the given character to the output as UTF-8, escaped. Call this +// function only when the input is wide. Returns true on success. Failure +// means there was some problem with the encoding, we'll still try to +// update the |*begin| pointer and add a placeholder character to the +// output so processing can continue. +// +// We will append the character starting at ch[begin] with the buffer ch +// being |length|. |*begin| will be updated to point to the last character +// consumed (we may consume more than one for UTF-16) so that if called in +// a loop, incrementing the pointer will move to the next character. +// +// Every single output character will be escaped. This means that if you +// give it an ASCII character as input, it will be escaped. Some code uses +// this when it knows that a character is invalid according to its rules +// for validity. If you don't want escaping for ASCII characters, you will +// have to filter them out prior to calling this function. +// +// Assumes that ch[begin] is within range in the array, but does not assume +// that any following characters are. +inline bool AppendUTF8EscapedChar(const char* str, int* begin, int length, CanonOutput* output) { + // ReadUTF8Char will handle invalid characters for us and give us the + // kUnicodeReplacementCharacter, so we don't have to do special checking + // after failure, just pass through the failure to the caller. + unsigned ch; + bool success = ReadUTFChar(str, begin, length, &ch); + AppendUTF8EscapedValue(ch, output); + return success; +} + +// Given a '%' character at |*begin| in the string |spec|, this will decode +// the escaped value and put it into |*unescaped_value| on success (returns +// true). On failure, this will return false, and will not write into +// |*unescaped_value|. +// +// |*begin| will be updated to point to the last character of the escape +// sequence so that when called with the index of a for loop, the next time +// through it will point to the next character to be considered. On failure, +// |*begin| will be unchanged. +inline bool Is8BitChar(char /*c*/) { + return true; // this case is specialized to avoid a warning +} + +template +inline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* unescaped_value) { + if (*begin + 3 > end || !Is8BitChar(spec[*begin + 1]) || !Is8BitChar(spec[*begin + 2])) { + // Invalid escape sequence because there's not enough room, or the + // digits are not ASCII. + return false; + } + + unsigned char first = static_cast(spec[*begin + 1]); + unsigned char second = static_cast(spec[*begin + 2]); + if (!IsHexChar(first) || !IsHexChar(second)) { + // Invalid hex digits, fail. + return false; + } + + // Valid escape sequence. + *unescaped_value = (HexCharToValue(first) << 4) + HexCharToValue(second); + *begin += 2; + return true; +} + +} // namespace url + +#endif // URL_URL_CANON_INTERNAL_H_ diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc new file mode 100644 index 0000000000000..2e13dc0cf8c87 --- /dev/null +++ b/source/common/chromium_url/url_canon_path.cc @@ -0,0 +1,417 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include + +#include "common/chromium_url/url_canon.h" +#include "common/chromium_url/url_canon_internal.h" +#include "common/chromium_url/url_parse_internal.h" + +namespace url { + +namespace { + +enum CharacterFlags { + // Pass through unchanged, whether escaped or unescaped. This doesn't + // actually set anything so you can't OR it to check, it's just to make the + // table below more clear when neither ESCAPE or UNESCAPE is set. + PASS = 0, + + // This character requires special handling in DoPartialPath. Doing this test + // first allows us to filter out the common cases of regular characters that + // can be directly copied. + SPECIAL = 1, + + // This character must be escaped in the canonical output. Note that all + // escaped chars also have the "special" bit set so that the code that looks + // for this is triggered. Not valid with PASS or ESCAPE + ESCAPE_BIT = 2, + ESCAPE = ESCAPE_BIT | SPECIAL, + + // This character must be unescaped in canonical output. Not valid with + // ESCAPE or PASS. We DON'T set the SPECIAL flag since if we encounter these + // characters unescaped, they should just be copied. + UNESCAPE = 4, + + // This character is disallowed in URLs. Note that the "special" bit is also + // set to trigger handling. + INVALID_BIT = 8, + INVALID = INVALID_BIT | SPECIAL, +}; + +// This table contains one of the above flag values. Note some flags are more +// than one bits because they also turn on the "special" flag. Special is the +// only flag that may be combined with others. +// +// This table is designed to match exactly what IE does with the characters. +// +// Dot is even more special, and the escaped version is handled specially by +// IsDot. Therefore, we don't need the "escape" flag, and even the "unescape" +// bit is never handled (we just need the "special") bit. +const unsigned char kPathCharLookup[0x100] = { + // NULL control chars... + INVALID, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, + // control chars... + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, + // ' ' ! " # $ % & ' ( ) * + // + , - . / + ESCAPE, PASS, ESCAPE, ESCAPE, PASS, ESCAPE, PASS, PASS, PASS, PASS, PASS, PASS, PASS, UNESCAPE, + SPECIAL, PASS, + // 0 1 2 3 4 5 6 7 8 9 : + // ; < = > ? + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, PASS, PASS, ESCAPE, PASS, ESCAPE, ESCAPE, + // @ A B C D E F G H I J + // K L M N O + PASS, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + // P Q R S T U V W X Y Z + // [ \ ] ^ _ + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, PASS, ESCAPE, PASS, ESCAPE, UNESCAPE, + // ` a b c d e f g h i j + // k l m n o + ESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + // p q r s t u v w x y z + // { | } ~ + UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, + UNESCAPE, UNESCAPE, ESCAPE, ESCAPE, ESCAPE, UNESCAPE, ESCAPE, + // ...all the high-bit characters are escaped + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, + ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE}; + +enum DotDisposition { + // The given dot is just part of a filename and is not special. + NOT_A_DIRECTORY, + + // The given dot is the current directory. + DIRECTORY_CUR, + + // The given dot is the first of a double dot that should take us up one. + DIRECTORY_UP +}; + +// When the path resolver finds a dot, this function is called with the +// character following that dot to see what it is. The return value +// indicates what type this dot is (see above). This code handles the case +// where the dot is at the end of the input. +// +// |*consumed_len| will contain the number of characters in the input that +// express what we found. +// +// If the input is "../foo", |after_dot| = 1, |end| = 6, and +// at the end, |*consumed_len| = 2 for the "./" this function consumed. The +// original dot length should be handled by the caller. +template +DotDisposition ClassifyAfterDot(const CHAR* spec, int after_dot, int end, int* consumed_len) { + if (after_dot == end) { + // Single dot at the end. + *consumed_len = 0; + return DIRECTORY_CUR; + } + if (IsURLSlash(spec[after_dot])) { + // Single dot followed by a slash. + *consumed_len = 1; // Consume the slash + return DIRECTORY_CUR; + } + + int second_dot_len = IsDot(spec, after_dot, end); + if (second_dot_len) { + int after_second_dot = after_dot + second_dot_len; + if (after_second_dot == end) { + // Double dot at the end. + *consumed_len = second_dot_len; + return DIRECTORY_UP; + } + if (IsURLSlash(spec[after_second_dot])) { + // Double dot followed by a slash. + *consumed_len = second_dot_len + 1; + return DIRECTORY_UP; + } + } + + // The dots are followed by something else, not a directory. + *consumed_len = 0; + return NOT_A_DIRECTORY; +} + +// Rewinds the output to the previous slash. It is assumed that the output +// ends with a slash and this doesn't count (we call this when we are +// appending directory paths, so the previous path component has and ending +// slash). +// +// This will stop at the first slash (assumed to be at position +// |path_begin_in_output| and not go any higher than that. Some web pages +// do ".." too many times, so we need to handle that brokenness. +// +// It searches for a literal slash rather than including a backslash as well +// because it is run only on the canonical output. +// +// The output is guaranteed to end in a slash when this function completes. +void BackUpToPreviousSlash(int path_begin_in_output, CanonOutput* output) { + DCHECK(output->length() > 0); + + int i = output->length() - 1; + DCHECK(output->at(i) == '/'); + if (i == path_begin_in_output) + return; // We're at the first slash, nothing to do. + + // Now back up (skipping the trailing slash) until we find another slash. + i--; + while (output->at(i) != '/' && i > path_begin_in_output) + i--; + + // Now shrink the output to just include that last slash we found. + output->set_length(i + 1); +} + +// Looks for problematic nested escape sequences and escapes the output as +// needed to ensure they can't be misinterpreted. +// +// Our concern is that in input escape sequence that's invalid because it +// contains nested escape sequences might look valid once those are unescaped. +// For example, "%%300" is not a valid escape sequence, but after unescaping the +// inner "%30" this becomes "%00" which is valid. Leaving this in the output +// string can result in callers re-canonicalizing the string and unescaping this +// sequence, thus resulting in something fundamentally different than the +// original input here. This can cause a variety of problems. +// +// This function is called after we've just unescaped a sequence that's within +// two output characters of a previous '%' that we know didn't begin a valid +// escape sequence in the input string. We look for whether the output is going +// to turn into a valid escape sequence, and if so, convert the initial '%' into +// an escaped "%25" so the output can't be misinterpreted. +// +// |spec| is the input string we're canonicalizing. +// |next_input_index| is the index of the next unprocessed character in |spec|. +// |input_len| is the length of |spec|. +// |last_invalid_percent_index| is the index in |output| of a previously-seen +// '%' character. The caller knows this '%' character isn't followed by a valid +// escape sequence in the input string. +// |output| is the canonicalized output thus far. The caller guarantees this +// ends with a '%' followed by one or two characters, and the '%' is the one +// pointed to by |last_invalid_percent_index|. The last character in the string +// was just unescaped. +template +void CheckForNestedEscapes(const CHAR* spec, int next_input_index, int input_len, + int last_invalid_percent_index, CanonOutput* output) { + const int length = output->length(); + const char last_unescaped_char = output->at(length - 1); + + // If |output| currently looks like "%c", we need to try appending the next + // input character to see if this will result in a problematic escape + // sequence. Note that this won't trigger on the first nested escape of a + // two-escape sequence like "%%30%30" -- we'll allow the conversion to + // "%0%30" -- but the second nested escape will be caught by this function + // when it's called again in that case. + const bool append_next_char = last_invalid_percent_index == length - 2; + if (append_next_char) { + // If the input doesn't contain a 7-bit character next, this case won't be a + // problem. + if ((next_input_index == input_len) || (spec[next_input_index] >= 0x80)) + return; + output->push_back(static_cast(spec[next_input_index])); + } + + // Now output ends like "%cc". Try to unescape this. + int begin = last_invalid_percent_index; + unsigned char temp; + if (DecodeEscaped(output->data(), &begin, output->length(), &temp)) { + // New escape sequence found. Overwrite the characters following the '%' + // with "25", and push_back() the one or two characters that were following + // the '%' when we were called. + if (!append_next_char) + output->push_back(output->at(last_invalid_percent_index + 1)); + output->set(last_invalid_percent_index + 1, '2'); + output->set(last_invalid_percent_index + 2, '5'); + output->push_back(last_unescaped_char); + } else if (append_next_char) { + // Not a valid escape sequence, but we still need to undo appending the next + // source character so the caller can process it normally. + output->set_length(length); + } +} + +// Appends the given path to the output. It assumes that if the input path +// starts with a slash, it should be copied to the output. If no path has +// already been appended to the output (the case when not resolving +// relative URLs), the path should begin with a slash. +// +// If there are already path components (this mode is used when appending +// relative paths for resolving), it assumes that the output already has +// a trailing slash and that if the input begins with a slash, it should be +// copied to the output. +// +// We do not collapse multiple slashes in a row to a single slash. It seems +// no web browsers do this, and we don't want incompatibilities, even though +// it would be correct for most systems. +template +bool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_output, + CanonOutput* output) { + int end = path.end(); + + // We use this variable to minimize the amount of work done when unescaping -- + // we'll only call CheckForNestedEscapes() when this points at one of the last + // couple of characters in |output|. + int last_invalid_percent_index = INT_MIN; + + bool success = true; + for (int i = path.begin; i < end; i++) { + UCHAR uch = static_cast(spec[i]); + if (sizeof(CHAR) > 1 && uch >= 0x80) { + // We only need to test wide input for having non-ASCII characters. For + // narrow input, we'll always just use the lookup table. We don't try to + // do anything tricky with decoding/validating UTF-8. This function will + // read one or two UTF-16 characters and append the output as UTF-8. This + // call will be removed in 8-bit mode. + success &= AppendUTF8EscapedChar(spec, &i, end, output); + } else { + // Normal ASCII character or 8-bit input, use the lookup table. + unsigned char out_ch = static_cast(uch); + unsigned char flags = kPathCharLookup[out_ch]; + if (flags & SPECIAL) { + // Needs special handling of some sort. + int dotlen; + if ((dotlen = IsDot(spec, i, end)) > 0) { + // See if this dot was preceded by a slash in the output. We + // assume that when canonicalizing paths, they will always + // start with a slash and not a dot, so we don't have to + // bounds check the output. + // + // Note that we check this in the case of dots so we don't have to + // special case slashes. Since slashes are much more common than + // dots, this actually increases performance measurably (though + // slightly). + DCHECK(output->length() > path_begin_in_output); + if (output->length() > path_begin_in_output && output->at(output->length() - 1) == '/') { + // Slash followed by a dot, check to see if this is means relative + int consumed_len; + switch (ClassifyAfterDot(spec, i + dotlen, end, &consumed_len)) { + case NOT_A_DIRECTORY: + // Copy the dot to the output, it means nothing special. + output->push_back('.'); + i += dotlen - 1; + break; + case DIRECTORY_CUR: // Current directory, just skip the input. + i += dotlen + consumed_len - 1; + break; + case DIRECTORY_UP: + BackUpToPreviousSlash(path_begin_in_output, output); + i += dotlen + consumed_len - 1; + break; + } + } else { + // This dot is not preceded by a slash, it is just part of some + // file name. + output->push_back('.'); + i += dotlen - 1; + } + + } else if (out_ch == '\\') { + // Convert backslashes to forward slashes + output->push_back('/'); + + } else if (out_ch == '%') { + // Handle escape sequences. + unsigned char unescaped_value; + if (DecodeEscaped(spec, &i, end, &unescaped_value)) { + // Valid escape sequence, see if we keep, reject, or unescape it. + // Note that at this point DecodeEscape() will have advanced |i| to + // the last character of the escape sequence. + char unescaped_flags = kPathCharLookup[unescaped_value]; + + if (unescaped_flags & UNESCAPE) { + // This escaped value shouldn't be escaped. Try to copy it. + output->push_back(unescaped_value); + // If we just unescaped a value within 2 output characters of the + // '%' from a previously-detected invalid escape sequence, we + // might have an input string with problematic nested escape + // sequences; detect and fix them. + if (last_invalid_percent_index >= (output->length() - 3)) { + CheckForNestedEscapes(spec, i + 1, end, last_invalid_percent_index, output); + } + } else { + // Either this is an invalid escaped character, or it's a valid + // escaped character we should keep escaped. In the first case we + // should just copy it exactly and remember the error. In the + // second we also copy exactly in case the server is sensitive to + // changing the case of any hex letters. + output->push_back('%'); + output->push_back(static_cast(spec[i - 1])); + output->push_back(static_cast(spec[i])); + if (unescaped_flags & INVALID_BIT) + success = false; + } + } else { + // Invalid escape sequence. IE7+ rejects any URLs with such + // sequences, while other browsers pass them through unchanged. We + // use the permissive behavior. + // TODO(brettw): Consider testing IE's strict behavior, which would + // allow removing the code to handle nested escapes above. + last_invalid_percent_index = output->length(); + output->push_back('%'); + } + + } else if (flags & INVALID_BIT) { + // For NULLs, etc. fail. + AppendEscapedChar(out_ch, output); + success = false; + + } else if (flags & ESCAPE_BIT) { + // This character should be escaped. + AppendEscapedChar(out_ch, output); + } + } else { + // Nothing special about this character, just append it. + output->push_back(out_ch); + } + } + } + return success; +} + +template +bool DoPath(const CHAR* spec, const Component& path, CanonOutput* output, Component* out_path) { + bool success = true; + out_path->begin = output->length(); + if (path.len > 0) { + // Write out an initial slash if the input has none. If we just parse a URL + // and then canonicalize it, it will of course have a slash already. This + // check is for the replacement and relative URL resolving cases of file + // URLs. + if (!IsURLSlash(spec[path.begin])) + output->push_back('/'); + + success = DoPartialPath(spec, path, out_path->begin, output); + } else { + // No input, canonical path is a slash. + output->push_back('/'); + } + out_path->len = output->length() - out_path->begin; + return success; +} + +} // namespace + +bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, + Component* out_path) { + return DoPath(spec, path, output, out_path); +} + +} // namespace url diff --git a/source/common/chromium_url/url_canon_stdstring.cc b/source/common/chromium_url/url_canon_stdstring.cc new file mode 100644 index 0000000000000..dc501d66ec26b --- /dev/null +++ b/source/common/chromium_url/url_canon_stdstring.cc @@ -0,0 +1,33 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/chromium_url/url_canon_stdstring.h" + +namespace url { + +StdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) { + cur_len_ = static_cast(str_->size()); // Append to existing data. + buffer_ = str_->empty() ? NULL : &(*str_)[0]; + buffer_len_ = static_cast(str_->size()); +} + +StdStringCanonOutput::~StdStringCanonOutput() { + // Nothing to do, we don't own the string. +} + +void StdStringCanonOutput::Complete() { + str_->resize(cur_len_); + buffer_len_ = cur_len_; +} + +void StdStringCanonOutput::Resize(int sz) { + str_->resize(sz); + buffer_ = str_->empty() ? NULL : &(*str_)[0]; + buffer_len_ = sz; +} + +} // namespace url diff --git a/source/common/chromium_url/url_canon_stdstring.h b/source/common/chromium_url/url_canon_stdstring.h new file mode 100644 index 0000000000000..e502b1a3e6f55 --- /dev/null +++ b/source/common/chromium_url/url_canon_stdstring.h @@ -0,0 +1,58 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_CANON_STDSTRING_H_ +#define URL_URL_CANON_STDSTRING_H_ + +// This header file defines a canonicalizer output method class for STL +// strings. Because the canonicalizer tries not to be dependent on the STL, +// we have segregated it here. + +#include + +#include "common/chromium_url/envoy_shim.h" +#include "common/chromium_url/url_canon.h" + +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete + +namespace url { + +// Write into a std::string given in the constructor. This object does not own +// the string itself, and the user must ensure that the string stays alive +// throughout the lifetime of this object. +// +// The given string will be appended to; any existing data in the string will +// be preserved. +// +// Note that when canonicalization is complete, the string will likely have +// unused space at the end because we make the string very big to start out +// with (by |initial_size|). This ends up being important because resize +// operations are slow, and because the base class needs to write directly +// into the buffer. +// +// Therefore, the user should call Complete() before using the string that +// this class wrote into. +class COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput { +public: + StdStringCanonOutput(std::string* str); + ~StdStringCanonOutput() override; + + // Must be called after writing has completed but before the string is used. + void Complete(); + + void Resize(int sz) override; + +protected: + std::string* str_; + DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput); +}; + +} // namespace url + +#endif // URL_URL_CANON_STDSTRING_H_ diff --git a/source/common/chromium_url/url_parse.h b/source/common/chromium_url/url_parse.h new file mode 100644 index 0000000000000..31d7d3f16c1e5 --- /dev/null +++ b/source/common/chromium_url/url_parse.h @@ -0,0 +1,49 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_PARSE_H_ +#define URL_PARSE_H_ + +namespace url { + +// Component ------------------------------------------------------------------ + +// Represents a substring for URL parsing. +struct Component { + Component() : begin(0), len(-1) {} + + // Normal constructor: takes an offset and a length. + Component(int b, int l) : begin(b), len(l) {} + + int end() const { return begin + len; } + + // Returns true if this component is valid, meaning the length is given. Even + // valid components may be empty to record the fact that they exist. + bool is_valid() const { return (len != -1); } + + // Returns true if the given component is specified on false, the component + // is either empty or invalid. + bool is_nonempty() const { return (len > 0); } + + void reset() { + begin = 0; + len = -1; + } + + bool operator==(const Component& other) const { return begin == other.begin && len == other.len; } + + int begin; // Byte offset in the string of this component. + int len; // Will be -1 if the component is unspecified. +}; + +// Helper that returns a component created with the given begin and ending +// points. The ending point is non-inclusive. +inline Component MakeRange(int begin, int end) { return Component(begin, end - begin); } + +} // namespace url + +#endif // URL_PARSE_H_ diff --git a/source/common/chromium_url/url_parse_internal.h b/source/common/chromium_url/url_parse_internal.h new file mode 100644 index 0000000000000..a8c15819048be --- /dev/null +++ b/source/common/chromium_url/url_parse_internal.h @@ -0,0 +1,18 @@ +// Envoy snapshot of Chromium URL path normalization, see README.md. +// NOLINT(namespace-envoy) + +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef URL_URL_PARSE_INTERNAL_H_ +#define URL_URL_PARSE_INTERNAL_H_ + +namespace url { + +// We treat slashes and backslashes the same for IE compatibility. +inline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\'; } + +} // namespace url + +#endif // URL_URL_PARSE_INTERNAL_H_ diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 01b8c8658d7c9..06b17541a93cd 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -148,6 +148,7 @@ MiB NACK NACKed NBF +NBSP NDEBUG NGHTTP NOLINT @@ -186,6 +187,7 @@ RCU RDN RDS RDWR +README REIMPLEMENT REQ RFC @@ -254,6 +256,7 @@ UTC UTF UUID UUIDs +VC VH VHDS VLOG @@ -319,8 +322,11 @@ broadcasted buf builtin cancellable +canonicalization canonicalize +canonicalizing canonicalized +canonicalizer cardinality casted chrono @@ -684,6 +690,7 @@ symlinked sync sys sysctl +sz tbl tcmalloc templated @@ -716,7 +723,9 @@ undrained uneject unejected unejection +unescape unescaped +unescaping uninsantiated uninstantiated unix @@ -770,6 +779,8 @@ xhtml xid xxhash xxs +xxxx +xxxxx xyz zag zig From 7ed6d2187df94c4cb96f7dccb8643bf764af2ccb Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 27 Mar 2019 12:48:04 -0400 Subject: [PATCH 061/165] hcm: path normalization. (#1) Provide the HTTP path normalization per RFC 3986 (sans case normalization). This addresses CVE-2019-9901. The config HttpConnectionManager.normalize_path needs to be set for each HCM configuration to enable (default is off). There is also a runtime optione http_connection_manager.normalize_path to change this default when not set in HCM. Risk level: Low Testing: New unit and integration tests added. Signed-off-by: Yuchen Dai Signed-off-by: Harvey Tuch --- .../v2/http_connection_manager.proto | 14 ++- .../configuration/http_conn_man/runtime.rst | 8 ++ docs/root/intro/version_history.rst | 6 +- source/common/http/BUILD | 13 ++ source/common/http/conn_manager_config.h | 5 + source/common/http/conn_manager_impl.cc | 10 ++ source/common/http/conn_manager_utility.cc | 11 ++ source/common/http/conn_manager_utility.h | 5 + source/common/http/header_map_impl.cc | 2 + source/common/http/header_map_impl.h | 1 + source/common/http/path_utility.cc | 55 +++++++++ source/common/http/path_utility.h | 19 +++ .../network/http_connection_manager/config.cc | 9 +- .../network/http_connection_manager/config.h | 2 + source/server/http/admin.h | 1 + test/common/http/BUILD | 9 ++ .../http/conn_manager_impl_fuzz_test.cc | 4 +- test/common/http/conn_manager_impl_test.cc | 112 +++++++++++++++++- test/common/http/conn_manager_utility_test.cc | 34 ++++++ test/common/http/path_utility_test.cc | 89 ++++++++++++++ .../http/rbac/rbac_filter_integration_test.cc | 64 ++++++++++ .../http_connection_manager/config_test.cc | 71 +++++++++++ test/integration/header_integration_test.cc | 79 ++++++++++++ 23 files changed, 617 insertions(+), 6 deletions(-) create mode 100644 source/common/http/path_utility.cc create mode 100644 source/common/http/path_utility.h create mode 100644 test/common/http/path_utility_test.cc diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 71f29474a1c96..b2bc2b8e3c3e2 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -380,8 +380,18 @@ message HttpConnectionManager { reserved 27; - // This is reserved for a pending security fix. - reserved 30; + // Should paths be normalized according to RFC 3986 before any processing of + // requests by HTTP filters or routing? This affects the upstream *:path* header + // as well. For paths that fail this check, Envoy will respond with 400 to + // paths that are malformed. This defaults to false currently but will default + // true in the future. When not specified, this value may be overridden by the + // runtime variable + // :ref:`http_connection_manager.normalize_path`. + // See `Normalization and Comparison ` + // for details of normalization. + // Note that Envoy does not perform + // `case normalization ` + google.protobuf.BoolValue normalize_path = 30; } message Rds { diff --git a/docs/root/configuration/http_conn_man/runtime.rst b/docs/root/configuration/http_conn_man/runtime.rst index 22fc453b3ad93..dcc85412c6315 100644 --- a/docs/root/configuration/http_conn_man/runtime.rst +++ b/docs/root/configuration/http_conn_man/runtime.rst @@ -5,6 +5,14 @@ Runtime The HTTP connection manager supports the following runtime settings: +.. _config_http_conn_man_runtime_normalize_path: + +http_connection_manager.normalize_path + % of requests that will have path normalization applied if not already configured in + :ref:`normalize_path `. + This is evaluated at configuration load time and will apply to all requests for a given + configuration. + .. _config_http_conn_man_runtime_client_enabled: tracing.client_enabled diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 6880dd4d1fa63..049cf1cba924f 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -81,13 +81,17 @@ Version history * tracing: added :ref:`verbose ` to support logging annotations on spans. * upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size` config parameter to strictly bound the ring size. * zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events). - Refer to ::ref:`ZooKeeper proxy` for more details. + Refer to :ref:`ZooKeeper proxy` for more details. * upstream: added configuration option to select any host when the fallback policy fails. * upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. 1.9.1 (Apr 2, 2019) =================== * http: fixed CVE-2019-9900 by rejecting HTTP/1.x headers with embedded NUL characters. +* http: fixed CVE-2019-9901 by normalizing HTTP paths prior to routing or L7 data plane processing. + This defaults off and is configurable via either HTTP connection manager :ref:`normalize_path + ` + or the :ref:`runtime `. 1.9.0 (Dec 20, 2018) ==================== diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 903f861828657..b947d3b5293d4 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -141,6 +141,7 @@ envoy_cc_library( ":exception_lib", ":header_map_lib", ":headers_lib", + ":path_utility_lib", ":user_agent_lib", ":utility_lib", "//include/envoy/access_log:access_log_interface", @@ -314,3 +315,15 @@ envoy_cc_library( "@envoy_api//envoy/type:range_cc", ], ) + +envoy_cc_library( + name = "path_utility_lib", + srcs = ["path_utility.cc"], + hdrs = ["path_utility.h"], + external_deps = ["abseil_optional"], + deps = [ + "//include/envoy/http:header_map_interface", + "//source/common/chromium_url", + "//source/common/common:logger_lib", + ], +) diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index d5cb27c025fbc..2fb4ee9e9c575 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -340,6 +340,11 @@ class ConnectionManagerConfig { * @return supplies the http1 settings. */ virtual const Http::Http1Settings& http1Settings() const PURE; + + /** + * @return if the HttpConnectionManager should normalize url following RFC3986 + */ + virtual bool shouldNormalizePath() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 9c23eb39b6d1a..80932dc69bd11 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -29,9 +29,11 @@ #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" +#include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" +#include "absl/strings/escaping.h" #include "absl/strings/match.h" namespace Envoy { @@ -666,6 +668,14 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, return; } + // Path sanitization should happen before any path access other than the above sanity check. + if (!ConnectionManagerUtility::maybeNormalizePath(*request_headers_, + connection_manager_.config_)) { + sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", + nullptr, is_head_request_, absl::nullopt); + return; + } + if (protocol == Protocol::Http11 && request_headers_->Connection() && absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(), Http::Headers::get().ConnectionValues.Close)) { diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index f85965cf05cc5..164b8712c2954 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -10,6 +10,7 @@ #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" +#include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" #include "common/runtime/uuid_util.h" @@ -364,5 +365,15 @@ void ConnectionManagerUtility::mutateResponseHeaders(HeaderMap& response_headers } } +/* static */ +bool ConnectionManagerUtility::maybeNormalizePath(HeaderMap& request_headers, + const ConnectionManagerConfig& config) { + ASSERT(request_headers.Path()); + if (config.shouldNormalizePath()) { + return PathUtil::canonicalPath(*request_headers.Path()); + } + return true; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index 0d313f185e649..126982df77533 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -59,6 +59,11 @@ class ConnectionManagerUtility { static void mutateResponseHeaders(HeaderMap& response_headers, const HeaderMap* request_headers, const std::string& via); + // Sanitize the path in the header map if forced by config. + // Side affect: the string view of Path header is invalidated. + // Return false if error happens during the sanitization. + static bool maybeNormalizePath(HeaderMap& request_headers, const ConnectionManagerConfig& config); + private: /** * Mutate request headers if request needs to be traced. diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 56185e9058ec5..66500d425383d 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -349,6 +349,8 @@ bool HeaderMapImpl::operator==(const HeaderMapImpl& rhs) const { return true; } +bool HeaderMapImpl::operator!=(const HeaderMapImpl& rhs) const { return !operator==(rhs); } + void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { EntryCb cb = ConstSingleton::get().find(key.c_str()); if (cb) { diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 564fbd0c7495e..376311b4536f7 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -61,6 +61,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { * comparison (order matters). */ bool operator==(const HeaderMapImpl& rhs) const; + bool operator!=(const HeaderMapImpl& rhs) const; // Http::HeaderMap void addReference(const LowerCaseString& key, const std::string& value) override; diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc new file mode 100644 index 0000000000000..796c2c1cbd52b --- /dev/null +++ b/source/common/http/path_utility.cc @@ -0,0 +1,55 @@ +#include "common/http/path_utility.h" + +#include "common/chromium_url/url_canon.h" +#include "common/chromium_url/url_canon_stdstring.h" +#include "common/common/logger.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { +namespace Http { + +namespace { +absl::optional canonicalizePath(absl::string_view original_path) { + std::string canonical_path; + url::Component in_component(0, original_path.size()); + url::Component out_component; + url::StdStringCanonOutput output(&canonical_path); + if (!url::CanonicalizePath(original_path.data(), in_component, &output, &out_component)) { + return absl::nullopt; + } else { + output.Complete(); + return absl::make_optional(std::move(canonical_path)); + } +} +} // namespace + +/* static */ +bool PathUtil::canonicalPath(HeaderEntry& path_header) { + const auto original_path = path_header.value().getStringView(); + // canonicalPath is supposed to apply on path component in URL instead of :path header + const auto query_pos = original_path.find('?'); + auto normalized_path_opt = canonicalizePath( + query_pos == original_path.npos + ? original_path + : absl::string_view(original_path.data(), query_pos) // '?' is not included + ); + + if (!normalized_path_opt.has_value()) { + return false; + } + auto& normalized_path = normalized_path_opt.value(); + const absl::string_view query_suffix = + query_pos == original_path.npos + ? absl::string_view{} + : absl::string_view{original_path.data() + query_pos, original_path.size() - query_pos}; + if (query_suffix.size() > 0) { + normalized_path.insert(normalized_path.end(), query_suffix.begin(), query_suffix.end()); + } + path_header.value(std::move(normalized_path)); + return true; +} + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h new file mode 100644 index 0000000000000..ad0d32c3ff7d6 --- /dev/null +++ b/source/common/http/path_utility.h @@ -0,0 +1,19 @@ +#pragma once + +#include "envoy/http/header_map.h" + +namespace Envoy { +namespace Http { + +/** + * Path helper extracted from chromium project. + */ +class PathUtil { +public: + // Returns if the normalization succeeds. + // If it is successful, the param will be updated with the normalized path. + static bool canonicalPath(HeaderEntry& path_header); +}; + +} // namespace Http +} // namespace Envoy diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index e5069bbf39e69..0b0b294913e7d 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -150,7 +150,14 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( listener_stats_(Http::ConnectionManagerImpl::generateListenerStats(stats_prefix_, context_.listenerScope())), proxy_100_continue_(config.proxy_100_continue()), - delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)) { + delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)), + normalize_path_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, normalize_path, + // TODO(htuch): we should have a + // boolean variant of featureEnabled() + // here. + context.runtime().snapshot().featureEnabled( + "http_connection_manager.normalize_path", 0))) { route_config_provider_ = Router::RouteConfigProviderUtil::create(config, context_, stats_prefix_, route_config_provider_manager_); diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 0d6bcd87668a2..c1e7332b653c1 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -127,6 +127,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, Http::ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } + bool shouldNormalizePath() const override { return normalize_path_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } private: @@ -167,6 +168,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, Http::ConnectionManagerListenerStats listener_stats_; const bool proxy_100_continue_; std::chrono::milliseconds delayed_close_timeout_; + const bool normalize_path_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 31d8511a9c579..7bffdeb2b0367 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -126,6 +126,7 @@ class AdminImpl : public Admin, Http::ConnectionManagerListenerStats& listenerStats() override { return listener_->stats_; } bool proxy100Continue() const override { return false; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } + bool shouldNormalizePath() const override { return true; } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::HeaderMap& response_headers, std::string& body) override; void closeSocket(); diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 91e16157e144c..aaf0f6d92024c 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -321,3 +321,12 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_test( + name = "path_utility_test", + srcs = ["path_utility_test.cc"], + deps = [ + "//source/common/http:header_map_lib", + "//source/common/http:path_utility_lib", + ], +) diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 334edf234f8fb..07762102ba9ae 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -116,6 +116,7 @@ class FuzzConfig : public ConnectionManagerConfig { ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } + bool shouldNormalizePath() const override { return false; } const envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager config_; std::list access_logs_; @@ -142,9 +143,10 @@ class FuzzConfig : public ConnectionManagerConfig { Network::Address::Ipv4Instance local_address_{"127.0.0.1"}; absl::optional user_agent_; TracingConnectionManagerConfigPtr tracing_config_; - bool proxy_100_continue_ = true; + bool proxy_100_continue_{true}; Http::Http1Settings http1_settings_; Http::DefaultInternalAddressConfig internal_address_config_; + bool normalize_path_{true}; }; // Internal representation of stream state. Encapsulates the stream state, mocks diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index cbec81157ee1f..05a9796879fa2 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -257,6 +257,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } + bool shouldNormalizePath() const override { return normalize_path_; } DangerousDeprecatedTestTime test_time_; RouteConfigProvider route_config_provider_; @@ -302,6 +303,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan ConnectionManagerListenerStats listener_stats_; bool proxy_100_continue_ = false; Http::Http1Settings http1_settings_; + bool normalize_path_ = false; NiceMock upstream_conn_; // for websocket tests NiceMock conn_pool_; // for websocket tests @@ -540,6 +542,115 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { conn_manager_->onData(fake_input, false); } +// Invalid paths are rejected with 400. +TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { + InSequence s; + setup(false, ""); + // Enable path sanitizer + normalize_path_ = true; + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{ + new TestHeaderMapImpl{{":authority", "host"}, + {":path", "/ab%00c"}, // "%00" is not valid in path according to RFC + {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + })); + + // This test also verifies that decoder/encoder filters have onDestroy() called only once. + MockStreamFilter* filter = new MockStreamFilter(); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(StreamFilterSharedPtr{filter}); + })); + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); + EXPECT_CALL(*filter, setEncoderFilterCallbacks(_)); + + EXPECT_CALL(*filter, encodeHeaders(_, true)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) + .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { + EXPECT_STREQ("400", headers.Status()->value().c_str()); + })); + EXPECT_CALL(*filter, onDestroy()); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +// Filters observe normalized paths, not the original path, when path +// normalization is configured. +TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { + setup(false, ""); + // Enable path sanitizer + normalize_path_ = true; + const std::string original_path = "/x/%2E%2e/z"; + const std::string normalized_path = "/z"; + + MockStreamFilter* filter = new MockStreamFilter(); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*filter, decodeHeaders(_, true)) + .WillRepeatedly(Invoke([&](HeaderMap& header_map, bool) -> FilterHeadersStatus { + EXPECT_EQ(normalized_path, header_map.Path()->value().c_str()); + return FilterHeadersStatus::StopIteration; + })); + + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{new TestHeaderMapImpl{ + {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + })); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +// The router observes normalized paths, not the original path, when path +// normalization is configured. +TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { + setup(false, ""); + // Enable path sanitizer + normalize_path_ = true; + const std::string original_path = "/x/%2E%2e/z"; + const std::string normalized_path = "/z"; + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{new TestHeaderMapImpl{ + {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + })); + + const std::string fake_cluster_name = "fake_cluster"; + + std::shared_ptr fake_cluster = + std::make_shared>(); + std::shared_ptr route = std::make_shared>(); + EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name)); + + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _)) + .WillOnce(Invoke([&](const Http::HeaderMap& header_map, uint64_t) { + EXPECT_EQ(normalized_path, header_map.Path()->value().c_str()); + return route; + })); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {})); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { setup(false, ""); @@ -3905,6 +4016,5 @@ TEST_F(HttpConnectionManagerImplTest, OverlyLongHeadersAcceptedIfConfigured) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request } - } // namespace Http } // namespace Envoy diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 523cfb01ee9bd..d1bc974e8b8c3 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -76,6 +76,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD0(listenerStats, ConnectionManagerListenerStats&()); MOCK_CONST_METHOD0(proxy100Continue, bool()); MOCK_CONST_METHOD0(http1Settings, const Http::Http1Settings&()); + MOCK_CONST_METHOD0(shouldNormalizePath, bool()); std::unique_ptr internal_address_config_ = std::make_unique(); @@ -1101,5 +1102,38 @@ TEST_F(ConnectionManagerUtilityTest, RemovesProxyResponseHeaders) { EXPECT_FALSE(response_headers.has("proxy-connection")); } +// maybeNormalizePath() does nothing by default. +TEST_F(ConnectionManagerUtilityTest, SanitizePathDefaultOff) { + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false)); + HeaderMapImpl original_headers; + original_headers.insertPath().value(std::string("/xyz/../a")); + + HeaderMapImpl header_map(static_cast(original_headers)); + ConnectionManagerUtility::maybeNormalizePath(header_map, config_); + EXPECT_EQ(original_headers, header_map); +} + +// maybeNormalizePath() leaves already normal paths alone. +TEST_F(ConnectionManagerUtilityTest, SanitizePathNormalPath) { + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true)); + HeaderMapImpl original_headers; + original_headers.insertPath().value(std::string("/xyz")); + + HeaderMapImpl header_map(static_cast(original_headers)); + ConnectionManagerUtility::maybeNormalizePath(header_map, config_); + EXPECT_EQ(original_headers, header_map); +} + +// maybeNormalizePath() normalizes relative paths. +TEST_F(ConnectionManagerUtilityTest, SanitizePathRelativePAth) { + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true)); + HeaderMapImpl original_headers; + original_headers.insertPath().value(std::string("/xyz/../abc")); + + HeaderMapImpl header_map(static_cast(original_headers)); + ConnectionManagerUtility::maybeNormalizePath(header_map, config_); + EXPECT_EQ(header_map.Path()->value().getStringView(), "/abc"); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/path_utility_test.cc b/test/common/http/path_utility_test.cc new file mode 100644 index 0000000000000..2cc299465add0 --- /dev/null +++ b/test/common/http/path_utility_test.cc @@ -0,0 +1,89 @@ +#include +#include + +#include "common/http/header_map_impl.h" +#include "common/http/path_utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Http { + +class PathUtilityTest : public testing::Test { +public: + // This is an indirect way to build a header entry for + // PathUtil::canonicalPath(), since we don't have direct access to the + // HeaderMapImpl constructor. + HeaderEntry& pathHeaderEntry(const std::string& path_value) { + headers_.insertPath().value(path_value); + return *headers_.Path(); + } + HeaderMapImpl headers_; +}; + +// Already normalized path don't change. +TEST_F(PathUtilityTest, AlreadyNormalPaths) { + const std::vector normal_paths{"/xyz", "/x/y/z"}; + for (const auto& path : normal_paths) { + auto& path_header = pathHeaderEntry(path); + const auto result = PathUtil::canonicalPath(path_header); + EXPECT_TRUE(result) << "original path: " << path; + EXPECT_EQ(path_header.value().getStringView(), absl::string_view(path)); + } +} + +// Invalid paths are rejected. +TEST_F(PathUtilityTest, InvalidPaths) { + const std::vector invalid_paths{"/xyz/.%00../abc", "/xyz/%00.%00./abc", + "/xyz/AAAAA%%0000/abc"}; + for (const auto& path : invalid_paths) { + auto& path_header = pathHeaderEntry(path); + EXPECT_FALSE(PathUtil::canonicalPath(path_header)) << "original path: " << path; + } +} + +// Paths that are valid get normalized. +TEST_F(PathUtilityTest, NormalizeValidPaths) { + const std::vector> non_normal_pairs{ + {"/a/b/../c", "/a/c"}, // parent dir + {"/a/b/./c", "/a/b/c"}, // current dir + {"a/b/../c", "/a/c"}, // non / start + {"/a/b/../../../../c", "/c"}, // out number parent + {"/a/..\\c", "/c"}, // "..\\" canonicalization + {"/%c0%af", "/%c0%af"}, // 2 bytes unicode reserved characters + {"/%5c%25", "/%5c%25"}, // reserved characters + {"/a/b/%2E%2E/c", "/a/c"} // %2E escape + }; + + for (const auto& path_pair : non_normal_pairs) { + auto& path_header = pathHeaderEntry(path_pair.first); + const auto result = PathUtil::canonicalPath(path_header); + EXPECT_TRUE(result) << "original path: " << path_pair.first; + EXPECT_EQ(path_header.value().getStringView(), path_pair.second) + << "original path: " << path_pair.second; + } +} + +// Paths that are valid get normalized. +TEST_F(PathUtilityTest, NormalizeCasePath) { + const std::vector> non_normal_pairs{ + {"/A/B/C", "/A/B/C"}, // not normalize to lower case + {"/a/b/%2E%2E/c", "/a/c"}, // %2E can be normalized to . + {"/a/b/%2e%2e/c", "/a/c"}, // %2e can be normalized to . + {"/a/%2F%2f/c", "/a/%2F%2f/c"}, // %2F is not normalized to %2f + }; + + for (const auto& path_pair : non_normal_pairs) { + auto& path_header = pathHeaderEntry(path_pair.first); + const auto result = PathUtil::canonicalPath(path_header); + EXPECT_TRUE(result) << "original path: " << path_pair.first; + EXPECT_EQ(path_header.value().getStringView(), path_pair.second) + << "original path: " << path_pair.second; + } +} +// These test cases are explicitly not covered above: +// "/../c\r\n\" '\n' '\r' should be excluded by http parser +// "/a/\0c", '\0' should be excluded by http parser + +} // namespace Http +} // namespace Envoy diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index cc2b654ac0573..d79359d41a067 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -19,6 +19,18 @@ name: envoy.filters.http.rbac - any: true )EOF"; +const std::string RBAC_CONFIG_WITH_PREFIX_MATCH = R"EOF( +name: envoy.filters.http.rbac +config: + rules: + policies: + foo: + permissions: + - header: { name: ":path", prefix_match: "/foo" } + principals: + - any: true +)EOF"; + typedef HttpProtocolIntegrationTest RBACIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest, @@ -68,6 +80,58 @@ TEST_P(RBACIntegrationTest, Denied) { EXPECT_STREQ("403", response->headers().Status()->value().c_str()); } +TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { + config_helper_.addConfigModifier( + [](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& cfg) { + cfg.mutable_normalize_path()->set_value(false); + }); + config_helper_.addFilter(RBAC_CONFIG_WITH_PREFIX_MATCH); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/foo/../bar"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_STREQ("200", response->headers().Status()->value().c_str()); +} + +TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { + config_helper_.addConfigModifier( + [](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& cfg) { + cfg.mutable_normalize_path()->set_value(true); + }); + config_helper_.addFilter(RBAC_CONFIG_WITH_PREFIX_MATCH); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/foo/../bar"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_STREQ("403", response->headers().Status()->value().c_str()); +} + TEST_P(RBACIntegrationTest, DeniedHeadReply) { config_helper_.addFilter(RBAC_CONFIG); initialize(); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index d11761322838b..aec8ee0da0b38 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -186,6 +186,77 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { EXPECT_EQ(0, config.streamIdleTimeout().count()); } +// Validated that by default we don't normalize paths +TEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_); + EXPECT_FALSE(config.shouldNormalizePath()); +} + +// Validated that we normalize paths with runtime override when not specified. +TEST_F(HttpConnectionManagerConfigTest, NormalizePathRuntime) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.router + )EOF"; + + EXPECT_CALL(context_.runtime_loader_.snapshot_, + featureEnabled("http_connection_manager.normalize_path", 0)) + .WillOnce(Return(true)); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_); + EXPECT_TRUE(config.shouldNormalizePath()); +} + +// Validated that when configured, we normalize paths, ignoring runtime. +TEST_F(HttpConnectionManagerConfigTest, NormalizePathTrue) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + normalize_path: true + http_filters: + - name: envoy.router + )EOF"; + + EXPECT_CALL(context_.runtime_loader_.snapshot_, + featureEnabled("http_connection_manager.normalize_path", 0)) + .Times(0); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_); + EXPECT_TRUE(config.shouldNormalizePath()); +} + +// Validated that when explicitly set false, we don't normalize, ignoring runtime. +TEST_F(HttpConnectionManagerConfigTest, NormalizePathFalse) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + normalize_path: false + http_filters: + - name: envoy.router + )EOF"; + + EXPECT_CALL(context_.runtime_loader_.snapshot_, + featureEnabled("http_connection_manager.normalize_path", 0)) + .Times(0); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_); + EXPECT_FALSE(config.shouldNormalizePath()); +} + TEST_F(HttpConnectionManagerConfigTest, ConfiguredRequestTimeout) { const std::string yaml_string = R"EOF( stat_prefix: ingress_http diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index 484b4430b005e..bfe95607a17a3 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -145,6 +145,23 @@ stat_prefix: header_test - header: key: "authorization" value: "token2" + - name: path-sanitization + domains: ["path-sanitization.com"] + routes: + - match: { prefix: "/private" } + route: + cluster: cluster_0 + request_headers_to_add: + - header: + key: "x-site" + value: "private" + - match: { prefix: "/public" } + route: + cluster: cluster_0 + request_headers_to_add: + - header: + key: "x-site" + value: "public" )EOF"; } // namespace @@ -294,6 +311,8 @@ class HeaderIntegrationTest } } + hcm.mutable_normalize_path()->set_value(normalize_path_); + if (append) { // The config specifies append by default: no modifications needed. return; @@ -412,6 +431,7 @@ class HeaderIntegrationTest } bool use_eds_{false}; + bool normalize_path_{false}; FakeHttpConnectionPtr eds_connection_; FakeStreamPtr eds_stream_; }; @@ -988,4 +1008,63 @@ TEST_P(HeaderIntegrationTest, TestAppendSameHeaders) { }); } +// Validates behavior when normalize path is off. +// Route selection and path to upstream are the exact string literal +// from downstream. +TEST_P(HeaderIntegrationTest, TestPathAndRouteWhenNormalizePathOff) { + normalize_path_ = false; + initializeFilter(HeaderMode::Append, false); + performRequest( + Http::TestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/private/../public"}, + {":scheme", "http"}, + {":authority", "path-sanitization.com"}, + }, + Http::TestHeaderMapImpl{{":authority", "path-sanitization.com"}, + {":path", "/private/../public"}, + {":method", "GET"}, + {"x-site", "private"}}, + Http::TestHeaderMapImpl{ + {"server", "envoy"}, + {"content-length", "0"}, + {":status", "200"}, + {"x-unmodified", "response"}, + }, + Http::TestHeaderMapImpl{ + {"server", "envoy"}, + {"x-unmodified", "response"}, + {":status", "200"}, + }); +} + +// Validates behavior when normalize path is on. +// Path to decide route and path to upstream are both +// the normalized. +TEST_P(HeaderIntegrationTest, TestPathAndRouteOnNormalizedPath) { + normalize_path_ = true; + initializeFilter(HeaderMode::Append, false); + performRequest( + Http::TestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/private/../public"}, + {":scheme", "http"}, + {":authority", "path-sanitization.com"}, + }, + Http::TestHeaderMapImpl{{":authority", "path-sanitization.com"}, + {":path", "/public"}, + {":method", "GET"}, + {"x-site", "public"}}, + Http::TestHeaderMapImpl{ + {"server", "envoy"}, + {"content-length", "0"}, + {":status", "200"}, + {"x-unmodified", "response"}, + }, + Http::TestHeaderMapImpl{ + {"server", "envoy"}, + {"x-unmodified", "response"}, + {":status", "200"}, + }); +} } // namespace Envoy From e95ef6bc43daeda16451ad4ef20979d8e07a5299 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 5 Apr 2019 11:07:44 -0700 Subject: [PATCH 062/165] release: bump to 1.10.0 (#6489) Signed-off-by: Matt Klein --- DEPRECATED.md | 2 +- VERSION | 2 +- docs/root/intro/version_history.rst | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DEPRECATED.md b/DEPRECATED.md index 9743d8d3a910b..d8c54f5b59c45 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -6,7 +6,7 @@ As of release 1.3.0, Envoy will follow a The following features have been DEPRECATED and will be removed in the specified release cycle. A logged warning is expected for each deprecated item that is in deprecation window. -## Version 1.10.0 (pending) +## Version 1.10.0 (Apr 5, 2019) * Use of `use_alpha` in [Ext-Authz Authorization Service](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/auth/v2/external_auth.proto) is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * Use of `enabled` in `CorsPolicy`, found in [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). diff --git a/VERSION b/VERSION index a01185b4d67a2..81c871de46b3e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.10.0-dev +1.10.0 diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 049cf1cba924f..6ff2666c85475 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -1,8 +1,8 @@ Version history --------------- -1.10.0 (pending) -================ +1.10.0 (Apr 5, 2019) +==================== * access log: added a new flag for upstream retry count exceeded. * access log: added a :ref:`gRPC filter ` to allow filtering on gRPC status. * access log: added a new flag for stream idle timeout. From 4179bb726ed6e4a9d8e9f815af47b52ebd94230a Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 5 Apr 2019 11:44:53 -0700 Subject: [PATCH 063/165] bump to 1.11.0-dev (#6490) Signed-off-by: Matt Klein --- DEPRECATED.md | 2 ++ VERSION | 2 +- docs/root/intro/version_history.rst | 3 +++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/DEPRECATED.md b/DEPRECATED.md index d8c54f5b59c45..d7320aa7d87ea 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -6,6 +6,8 @@ As of release 1.3.0, Envoy will follow a The following features have been DEPRECATED and will be removed in the specified release cycle. A logged warning is expected for each deprecated item that is in deprecation window. +## Version 1.11.0 (Pending) + ## Version 1.10.0 (Apr 5, 2019) * Use of `use_alpha` in [Ext-Authz Authorization Service](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/auth/v2/external_auth.proto) is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * Use of `enabled` in `CorsPolicy`, found in diff --git a/VERSION b/VERSION index 81c871de46b3e..1f724bf455d78 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.10.0 +1.11.0-dev diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 6ff2666c85475..04048c2037ade 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -1,6 +1,9 @@ Version history --------------- +1.11.0 (Pending) +================ + 1.10.0 (Apr 5, 2019) ==================== * access log: added a new flag for upstream retry count exceeded. From 2b17061e2d2ad689c3a84873e85a1dabc7fc64fc Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Fri, 5 Apr 2019 14:48:45 -0400 Subject: [PATCH 064/165] stat: Add counterFromStatName(), gaugeFromStatName(), and histogramFromStatName() (#6475) Signed-off-by: Joshua Marantz --- include/envoy/stats/scope.h | 24 +++++ source/common/stats/BUILD | 23 +++++ source/common/stats/isolated_store_impl.cc | 29 +----- source/common/stats/isolated_store_impl.h | 11 +-- source/common/stats/scope_prefixer.cc | 35 +++++++ source/common/stats/scope_prefixer.h | 38 ++++++++ source/common/stats/store_impl.h | 36 +++++++ source/common/stats/thread_local_store.h | 24 ++++- source/common/stats/utility.cc | 4 +- source/common/stats/utility.h | 4 +- test/common/stats/isolated_store_impl_test.cc | 96 +++++++++++++++---- test/common/stats/thread_local_store_test.cc | 75 +++++++++++++++ test/integration/server.h | 49 +++++++--- test/mocks/stats/BUILD | 1 + test/mocks/stats/mocks.cc | 2 +- test/mocks/stats/mocks.h | 12 ++- 16 files changed, 388 insertions(+), 75 deletions(-) create mode 100644 source/common/stats/scope_prefixer.cc create mode 100644 source/common/stats/scope_prefixer.h create mode 100644 source/common/stats/store_impl.h diff --git a/include/envoy/stats/scope.h b/include/envoy/stats/scope.h index 53d27f3ae5c96..eb3dbf0fa896d 100644 --- a/include/envoy/stats/scope.h +++ b/include/envoy/stats/scope.h @@ -43,11 +43,27 @@ class Scope { virtual void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) PURE; /** + * @param name The name of the stat, obtained from the SymbolTable. + * @return a counter within the scope's namespace. + */ + virtual Counter& counterFromStatName(StatName name) PURE; + + /** + * TODO(jmarantz): this variant is deprecated: use counterFromStatName. + * @param name The name, expressed as a string. * @return a counter within the scope's namespace. */ virtual Counter& counter(const std::string& name) PURE; /** + * @param name The name of the stat, obtained from the SymbolTable. + * @return a gauge within the scope's namespace. + */ + virtual Gauge& gaugeFromStatName(StatName name) PURE; + + /** + * TODO(jmarantz): this variant is deprecated: use gaugeFromStatName. + * @param name The name, expressed as a string. * @return a gauge within the scope's namespace. */ virtual Gauge& gauge(const std::string& name) PURE; @@ -58,6 +74,14 @@ class Scope { virtual NullGaugeImpl& nullGauge(const std::string& name) PURE; /** + * @param name The name of the stat, obtained from the SymbolTable. + * @return a histogram within the scope's namespace with a particular value type. + */ + virtual Histogram& histogramFromStatName(StatName name) PURE; + + /** + * TODO(jmarantz): this variant is deprecated: use histogramFromStatName. + * @param name The name, expressed as a string. * @return a histogram within the scope's namespace with a particular value type. */ virtual Histogram& histogram(const std::string& name) PURE; diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index 7efb91a9f9442..40d5a43d11cb4 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -45,8 +45,10 @@ envoy_cc_library( deps = [ ":fake_symbol_table_lib", ":histogram_lib", + ":scope_prefixer_lib", ":stats_lib", ":stats_options_lib", + ":store_impl_lib", "//include/envoy/stats:stats_macros", "//source/common/stats:heap_stat_data_lib", ], @@ -62,6 +64,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "store_impl_lib", + hdrs = ["store_impl.h"], + deps = [ + ":symbol_table_lib", + "//include/envoy/stats:stats_interface", + ], +) + envoy_cc_library( name = "raw_stat_data_lib", srcs = ["raw_stat_data.cc"], @@ -76,6 +87,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "scope_prefixer_lib", + srcs = ["scope_prefixer.cc"], + hdrs = ["scope_prefixer.h"], + deps = [ + ":symbol_table_lib", + ":utility_lib", + "//include/envoy/stats:stats_interface", + ], +) + envoy_cc_library( name = "source_impl_lib", srcs = ["source_impl.cc"], @@ -195,6 +217,7 @@ envoy_cc_library( hdrs = ["thread_local_store.h"], deps = [ ":heap_stat_data_lib", + ":scope_prefixer_lib", ":stats_lib", ":stats_matcher_lib", ":tag_producer_lib", diff --git a/source/common/stats/isolated_store_impl.cc b/source/common/stats/isolated_store_impl.cc index afec3df91aaa4..e10dc340986f4 100644 --- a/source/common/stats/isolated_store_impl.cc +++ b/source/common/stats/isolated_store_impl.cc @@ -8,6 +8,7 @@ #include "common/common/utility.h" #include "common/stats/fake_symbol_table_impl.h" #include "common/stats/histogram_impl.h" +#include "common/stats/scope_prefixer.h" #include "common/stats/utility.h" namespace Envoy { @@ -22,7 +23,7 @@ IsolatedStoreImpl::IsolatedStoreImpl(std::unique_ptr&& symbol_table } IsolatedStoreImpl::IsolatedStoreImpl(SymbolTable& symbol_table) - : symbol_table_(symbol_table), alloc_(symbol_table_), + : StoreImpl(symbol_table), alloc_(symbol_table), counters_([this](const std::string& name) -> CounterSharedPtr { std::string tag_extracted_name = name; std::vector tags; @@ -37,32 +38,8 @@ IsolatedStoreImpl::IsolatedStoreImpl(SymbolTable& symbol_table) return std::make_shared(name, *this, std::string(name), std::vector()); }) {} -struct IsolatedScopeImpl : public Scope { - IsolatedScopeImpl(IsolatedStoreImpl& parent, const std::string& prefix) - : parent_(parent), prefix_(Utility::sanitizeStatsName(prefix)) {} - - // Stats::Scope - ScopePtr createScope(const std::string& name) override { - return ScopePtr{new IsolatedScopeImpl(parent_, prefix_ + name)}; - } - void deliverHistogramToSinks(const Histogram&, uint64_t) override {} - Counter& counter(const std::string& name) override { return parent_.counter(prefix_ + name); } - Gauge& gauge(const std::string& name) override { return parent_.gauge(prefix_ + name); } - NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } - Histogram& histogram(const std::string& name) override { - return parent_.histogram(prefix_ + name); - } - const Stats::StatsOptions& statsOptions() const override { return parent_.statsOptions(); } - const SymbolTable& symbolTable() const override { return parent_.symbolTable(); } - SymbolTable& symbolTable() override { return parent_.symbolTable(); } - - IsolatedStoreImpl& parent_; - NullGaugeImpl null_gauge_; - const std::string prefix_; -}; - ScopePtr IsolatedStoreImpl::createScope(const std::string& name) { - return ScopePtr{new IsolatedScopeImpl(*this, name)}; + return std::make_unique(name, *this); } } // namespace Stats diff --git a/source/common/stats/isolated_store_impl.h b/source/common/stats/isolated_store_impl.h index 1537ffb9fdf10..0cf8207a23e83 100644 --- a/source/common/stats/isolated_store_impl.h +++ b/source/common/stats/isolated_store_impl.h @@ -12,6 +12,7 @@ #include "common/common/utility.h" #include "common/stats/heap_stat_data.h" #include "common/stats/stats_options_impl.h" +#include "common/stats/store_impl.h" #include "common/stats/symbol_table_impl.h" #include "common/stats/utility.h" @@ -55,7 +56,7 @@ template class IsolatedStatsCache { Allocator alloc_; }; -class IsolatedStoreImpl : public Store { +class IsolatedStoreImpl : public StoreImpl { public: IsolatedStoreImpl(); explicit IsolatedStoreImpl(SymbolTable& symbol_table); @@ -66,13 +67,8 @@ class IsolatedStoreImpl : public Store { void deliverHistogramToSinks(const Histogram&, uint64_t) override {} Gauge& gauge(const std::string& name) override { return gauges_.get(name); } NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } - Histogram& histogram(const std::string& name) override { - Histogram& histogram = histograms_.get(name); - return histogram; - } + Histogram& histogram(const std::string& name) override { return histograms_.get(name); } const Stats::StatsOptions& statsOptions() const override { return stats_options_; } - const SymbolTable& symbolTable() const override { return symbol_table_; } - virtual SymbolTable& symbolTable() override { return symbol_table_; } // Stats::Store std::vector counters() const override { return counters_.toVector(); } @@ -85,7 +81,6 @@ class IsolatedStoreImpl : public Store { IsolatedStoreImpl(std::unique_ptr&& symbol_table); std::unique_ptr symbol_table_storage_; - SymbolTable& symbol_table_; HeapStatDataAllocator alloc_; IsolatedStatsCache counters_; IsolatedStatsCache gauges_; diff --git a/source/common/stats/scope_prefixer.cc b/source/common/stats/scope_prefixer.cc new file mode 100644 index 0000000000000..96e1e8be7b0af --- /dev/null +++ b/source/common/stats/scope_prefixer.cc @@ -0,0 +1,35 @@ +#include "common/stats/scope_prefixer.h" + +#include "envoy/stats/scope.h" + +#include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" + +namespace Envoy { +namespace Stats { + +ScopePrefixer::ScopePrefixer(absl::string_view prefix, Scope& scope) + : prefix_(Utility::sanitizeStatsName(prefix)), scope_(scope) {} + +ScopePtr ScopePrefixer::createScope(const std::string& name) { + return std::make_unique(prefix_ + name, scope_); +} + +Counter& ScopePrefixer::counterFromStatName(StatName name) { + return counter(symbolTable().toString(name)); +} + +Gauge& ScopePrefixer::gaugeFromStatName(StatName name) { + return gauge(symbolTable().toString(name)); +} + +Histogram& ScopePrefixer::histogramFromStatName(StatName name) { + return histogram(symbolTable().toString(name)); +} + +void ScopePrefixer::deliverHistogramToSinks(const Histogram& histograms, uint64_t val) { + scope_.deliverHistogramToSinks(histograms, val); +} + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/scope_prefixer.h b/source/common/stats/scope_prefixer.h new file mode 100644 index 0000000000000..4871840f549e3 --- /dev/null +++ b/source/common/stats/scope_prefixer.h @@ -0,0 +1,38 @@ +#include "envoy/stats/scope.h" + +#include "common/stats/symbol_table_impl.h" + +namespace Envoy { +namespace Stats { + +// Implements a Scope that delegates to a passed-in scope, prefixing all names +// prior to creation. +class ScopePrefixer : public Scope { +public: + ScopePrefixer(absl::string_view prefix, Scope& scope); + + // Scope + ScopePtr createScope(const std::string& name) override; + Counter& counter(const std::string& name) override { return scope_.counter(prefix_ + name); } + Gauge& gauge(const std::string& name) override { return scope_.gauge(prefix_ + name); } + Histogram& histogram(const std::string& name) override { + return scope_.histogram(prefix_ + name); + } + void deliverHistogramToSinks(const Histogram& histograms, uint64_t val) override; + Counter& counterFromStatName(StatName name) override; + Gauge& gaugeFromStatName(StatName name) override; + Histogram& histogramFromStatName(StatName name) override; + + const Stats::StatsOptions& statsOptions() const override { return scope_.statsOptions(); } + const SymbolTable& symbolTable() const override { return scope_.symbolTable(); } + virtual SymbolTable& symbolTable() override { return scope_.symbolTable(); } + + NullGaugeImpl& nullGauge(const std::string& str) override { return scope_.nullGauge(str); } + +private: + std::string prefix_; + Scope& scope_; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/store_impl.h b/source/common/stats/store_impl.h new file mode 100644 index 0000000000000..94b2db6b06e1c --- /dev/null +++ b/source/common/stats/store_impl.h @@ -0,0 +1,36 @@ +#pragma once + +#include "envoy/stats/stats.h" +#include "envoy/stats/store.h" + +#include "common/stats/symbol_table_impl.h" + +namespace Envoy { +namespace Stats { + +/** + * Implements common parts of the Store API needed by multiple derivations of Store. + */ +class StoreImpl : public Store { +public: + explicit StoreImpl(SymbolTable& symbol_table) : symbol_table_(symbol_table) {} + + Counter& counterFromStatName(StatName name) override { + return counter(symbol_table_.toString(name)); + } + + Gauge& gaugeFromStatName(StatName name) override { return gauge(symbol_table_.toString(name)); } + + Histogram& histogramFromStatName(StatName name) override { + return histogram(symbol_table_.toString(name)); + } + + SymbolTable& symbolTable() override { return symbol_table_; } + const SymbolTable& symbolTable() const override { return symbol_table_; } + +private: + SymbolTable& symbol_table_; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 9180517387d42..afae6779edc0b 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -12,6 +12,7 @@ #include "common/stats/heap_stat_data.h" #include "common/stats/histogram_impl.h" #include "common/stats/source_impl.h" +#include "common/stats/symbol_table_impl.h" #include "common/stats/utility.h" #include "absl/container/flat_hash_map.h" @@ -139,16 +140,23 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo ~ThreadLocalStoreImpl(); // Stats::Scope + Counter& counterFromStatName(StatName name) override { + return default_scope_->counterFromStatName(name); + } Counter& counter(const std::string& name) override { return default_scope_->counter(name); } ScopePtr createScope(const std::string& name) override; void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override { return default_scope_->deliverHistogramToSinks(histogram, value); } + Gauge& gaugeFromStatName(StatName name) override { + return default_scope_->gaugeFromStatName(name); + } Gauge& gauge(const std::string& name) override { return default_scope_->gauge(name); } + Histogram& histogramFromStatName(StatName name) override { + return default_scope_->histogramFromStatName(name); + } + Histogram& histogram(const std::string& name) override { return default_scope_->histogram(name); } NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } - Histogram& histogram(const std::string& name) override { - return default_scope_->histogram(name); - }; const SymbolTable& symbolTable() const override { return alloc_.symbolTable(); } SymbolTable& symbolTable() override { return alloc_.symbolTable(); } @@ -242,6 +250,16 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo StatMap>* tls_cache, SharedStringSet* tls_rejected_stats, StatType& null_stat); + Counter& counterFromStatName(StatName name) override { + return counter(symbolTable().toString(name)); + } + + Gauge& gaugeFromStatName(StatName name) override { return gauge(symbolTable().toString(name)); } + + Histogram& histogramFromStatName(StatName name) override { + return histogram(symbolTable().toString(name)); + } + static std::atomic next_scope_id_; const uint64_t scope_id_; diff --git a/source/common/stats/utility.cc b/source/common/stats/utility.cc index afc4e8c4f4ca5..c5c980ee4e274 100644 --- a/source/common/stats/utility.cc +++ b/source/common/stats/utility.cc @@ -6,8 +6,8 @@ namespace Envoy { namespace Stats { -std::string Utility::sanitizeStatsName(const std::string& name) { - std::string stats_name = name; +std::string Utility::sanitizeStatsName(absl::string_view name) { + std::string stats_name = std::string(name); std::replace(stats_name.begin(), stats_name.end(), ':', '_'); std::replace(stats_name.begin(), stats_name.end(), '\0', '_'); return stats_name; diff --git a/source/common/stats/utility.h b/source/common/stats/utility.h index 18081360640c9..58872d0ce77e0 100644 --- a/source/common/stats/utility.h +++ b/source/common/stats/utility.h @@ -2,6 +2,8 @@ #include +#include "absl/strings/string_view.h" + namespace Envoy { namespace Stats { @@ -12,7 +14,7 @@ class Utility { public: // ':' is a reserved char in statsd. Do a character replacement to avoid costly inline // translations later. - static std::string sanitizeStatsName(const std::string& name); + static std::string sanitizeStatsName(const absl::string_view name); }; } // namespace Stats diff --git a/test/common/stats/isolated_store_impl_test.cc b/test/common/stats/isolated_store_impl_test.cc index b2aaa80693fdd..50c4aea7d0d95 100644 --- a/test/common/stats/isolated_store_impl_test.cc +++ b/test/common/stats/isolated_store_impl_test.cc @@ -11,11 +11,34 @@ namespace Envoy { namespace Stats { -TEST(StatsIsolatedStoreImplTest, All) { - IsolatedStoreImpl store; +class StatsIsolatedStoreImplTest : public testing::Test { +protected: + ~StatsIsolatedStoreImplTest() override { clearStorage(); } + + void clearStorage() { + for (auto& stat_name_storage : stat_name_storage_) { + stat_name_storage.free(store_.symbolTable()); + } + stat_name_storage_.clear(); + EXPECT_EQ(0, store_.symbolTable().numSymbols()); + } + + StatName makeStatName(absl::string_view name) { + stat_name_storage_.emplace_back(makeStatStorage(name)); + return stat_name_storage_.back().statName(); + } + + StatNameStorage makeStatStorage(absl::string_view name) { + return StatNameStorage(name, store_.symbolTable()); + } + + IsolatedStoreImpl store_; + std::vector stat_name_storage_; +}; - ScopePtr scope1 = store.createScope("scope1."); - Counter& c1 = store.counter("c1"); +TEST_F(StatsIsolatedStoreImplTest, All) { + ScopePtr scope1 = store_.createScope("scope1."); + Counter& c1 = store_.counter("c1"); Counter& c2 = scope1->counter("c2"); EXPECT_EQ("c1", c1.name()); EXPECT_EQ("scope1.c2", c2.name()); @@ -24,7 +47,7 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ(0, c1.tags().size()); EXPECT_EQ(0, c1.tags().size()); - Gauge& g1 = store.gauge("g1"); + Gauge& g1 = store_.gauge("g1"); Gauge& g2 = scope1->gauge("g2"); EXPECT_EQ("g1", g1.name()); EXPECT_EQ("scope1.g2", g2.name()); @@ -33,7 +56,7 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ(0, g1.tags().size()); EXPECT_EQ(0, g1.tags().size()); - Histogram& h1 = store.histogram("h1"); + Histogram& h1 = store_.histogram("h1"); Histogram& h2 = scope1->histogram("h2"); scope1->deliverHistogramToSinks(h2, 0); EXPECT_EQ("h1", h1.name()); @@ -52,16 +75,58 @@ TEST(StatsIsolatedStoreImplTest, All) { ScopePtr scope3 = scope1->createScope(std::string("foo:\0:.", 7)); EXPECT_EQ("scope1.foo___.bar", scope3->counter("bar").name()); - EXPECT_EQ(4UL, store.counters().size()); - EXPECT_EQ(2UL, store.gauges().size()); + EXPECT_EQ(4UL, store_.counters().size()); + EXPECT_EQ(2UL, store_.gauges().size()); +} + +TEST_F(StatsIsolatedStoreImplTest, AllWithSymbolTable) { + ScopePtr scope1 = store_.createScope("scope1."); + Counter& c1 = store_.counterFromStatName(makeStatName("c1")); + Counter& c2 = scope1->counterFromStatName(makeStatName("c2")); + EXPECT_EQ("c1", c1.name()); + EXPECT_EQ("scope1.c2", c2.name()); + EXPECT_EQ("c1", c1.tagExtractedName()); + EXPECT_EQ("scope1.c2", c2.tagExtractedName()); + EXPECT_EQ(0, c1.tags().size()); + EXPECT_EQ(0, c1.tags().size()); + + Gauge& g1 = store_.gaugeFromStatName(makeStatName("g1")); + Gauge& g2 = scope1->gaugeFromStatName(makeStatName("g2")); + EXPECT_EQ("g1", g1.name()); + EXPECT_EQ("scope1.g2", g2.name()); + EXPECT_EQ("g1", g1.tagExtractedName()); + EXPECT_EQ("scope1.g2", g2.tagExtractedName()); + EXPECT_EQ(0, g1.tags().size()); + EXPECT_EQ(0, g1.tags().size()); + + Histogram& h1 = store_.histogramFromStatName(makeStatName("h1")); + Histogram& h2 = scope1->histogramFromStatName(makeStatName("h2")); + scope1->deliverHistogramToSinks(h2, 0); + EXPECT_EQ("h1", h1.name()); + EXPECT_EQ("scope1.h2", h2.name()); + EXPECT_EQ("h1", h1.tagExtractedName()); + EXPECT_EQ("scope1.h2", h2.tagExtractedName()); + EXPECT_EQ(0, h1.tags().size()); + EXPECT_EQ(0, h2.tags().size()); + h1.recordValue(200); + h2.recordValue(200); + + ScopePtr scope2 = scope1->createScope("foo."); + EXPECT_EQ("scope1.foo.bar", scope2->counterFromStatName(makeStatName("bar")).name()); + + // Validate that we sanitize away bad characters in the stats prefix. + ScopePtr scope3 = scope1->createScope(std::string("foo:\0:.", 7)); + EXPECT_EQ("scope1.foo___.bar", scope3->counter("bar").name()); + + EXPECT_EQ(4UL, store_.counters().size()); + EXPECT_EQ(2UL, store_.gauges().size()); } -TEST(StatsIsolatedStoreImplTest, LongStatName) { - IsolatedStoreImpl store; +TEST_F(StatsIsolatedStoreImplTest, LongStatName) { Stats::StatsOptionsImpl stats_options; const std::string long_string(stats_options.maxNameLength() + 1, 'A'); - ScopePtr scope = store.createScope("scope."); + ScopePtr scope = store_.createScope("scope."); Counter& counter = scope->counter(long_string); EXPECT_EQ(absl::StrCat("scope.", long_string), counter.name()); } @@ -80,11 +145,10 @@ struct TestStats { ALL_TEST_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; -TEST(StatsMacros, All) { - IsolatedStoreImpl stats_store; - TestStats test_stats{ALL_TEST_STATS(POOL_COUNTER_PREFIX(stats_store, "test."), - POOL_GAUGE_PREFIX(stats_store, "test."), - POOL_HISTOGRAM_PREFIX(stats_store, "test."))}; +TEST_F(StatsIsolatedStoreImplTest, StatsMacros) { + TestStats test_stats{ALL_TEST_STATS(POOL_COUNTER_PREFIX(store_, "test."), + POOL_GAUGE_PREFIX(store_, "test."), + POOL_HISTOGRAM_PREFIX(store_, "test."))}; Counter& counter = test_stats.test_counter_; EXPECT_EQ("test.test_counter", counter.name()); diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 363a97d63a30e..c4b01a921aa3a 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -476,6 +476,81 @@ TEST_F(StatsThreadLocalStoreTest, HotRestartTruncation) { EXPECT_CALL(*alloc_, free(_)).Times(2); } +class LookupWithStatNameTest : public testing::Test { +public: + LookupWithStatNameTest() : alloc_(symbol_table_), store_(options_, alloc_) {} + ~LookupWithStatNameTest() override { + store_.shutdownThreading(); + clearStorage(); + } + + void clearStorage() { + for (auto& stat_name_storage : stat_name_storage_) { + stat_name_storage.free(store_.symbolTable()); + } + stat_name_storage_.clear(); + EXPECT_EQ(0, store_.symbolTable().numSymbols()); + } + + StatName makeStatName(absl::string_view name) { + stat_name_storage_.emplace_back(makeStatStorage(name)); + return stat_name_storage_.back().statName(); + } + + StatNameStorage makeStatStorage(absl::string_view name) { + return StatNameStorage(name, store_.symbolTable()); + } + + Stats::FakeSymbolTableImpl symbol_table_; + HeapStatDataAllocator alloc_; + StatsOptionsImpl options_; + ThreadLocalStoreImpl store_; + std::vector stat_name_storage_; +}; + +TEST_F(LookupWithStatNameTest, All) { + ScopePtr scope1 = store_.createScope("scope1."); + Counter& c1 = store_.counterFromStatName(makeStatName("c1")); + Counter& c2 = scope1->counterFromStatName(makeStatName("c2")); + EXPECT_EQ("c1", c1.name()); + EXPECT_EQ("scope1.c2", c2.name()); + EXPECT_EQ("c1", c1.tagExtractedName()); + EXPECT_EQ("scope1.c2", c2.tagExtractedName()); + EXPECT_EQ(0, c1.tags().size()); + EXPECT_EQ(0, c1.tags().size()); + + Gauge& g1 = store_.gaugeFromStatName(makeStatName("g1")); + Gauge& g2 = scope1->gaugeFromStatName(makeStatName("g2")); + EXPECT_EQ("g1", g1.name()); + EXPECT_EQ("scope1.g2", g2.name()); + EXPECT_EQ("g1", g1.tagExtractedName()); + EXPECT_EQ("scope1.g2", g2.tagExtractedName()); + EXPECT_EQ(0, g1.tags().size()); + EXPECT_EQ(0, g1.tags().size()); + + Histogram& h1 = store_.histogramFromStatName(makeStatName("h1")); + Histogram& h2 = scope1->histogramFromStatName(makeStatName("h2")); + scope1->deliverHistogramToSinks(h2, 0); + EXPECT_EQ("h1", h1.name()); + EXPECT_EQ("scope1.h2", h2.name()); + EXPECT_EQ("h1", h1.tagExtractedName()); + EXPECT_EQ("scope1.h2", h2.tagExtractedName()); + EXPECT_EQ(0, h1.tags().size()); + EXPECT_EQ(0, h2.tags().size()); + h1.recordValue(200); + h2.recordValue(200); + + ScopePtr scope2 = scope1->createScope("foo."); + EXPECT_EQ("scope1.foo.bar", scope2->counterFromStatName(makeStatName("bar")).name()); + + // Validate that we sanitize away bad characters in the stats prefix. + ScopePtr scope3 = scope1->createScope(std::string("foo:\0:.", 7)); + EXPECT_EQ("scope1.foo___.bar", scope3->counter("bar").name()); + + EXPECT_EQ(5UL, store_.counters().size()); // The 4 objects created plus stats.overflow. + EXPECT_EQ(2UL, store_.gauges().size()); +} + class StatsMatcherTLSTest : public StatsThreadLocalStoreTest { public: envoy::config::metrics::v2::StatsConfig stats_config_; diff --git a/test/integration/server.h b/test/integration/server.h index 8f07317102c13..a33cab4d9e499 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -75,21 +75,35 @@ class TestScopeWrapper : public Scope { wrapped_scope_->deliverHistogramToSinks(histogram, value); } - Counter& counter(const std::string& name) override { + Counter& counterFromStatName(StatName name) override { Thread::LockGuard lock(lock_); - return wrapped_scope_->counter(name); + return wrapped_scope_->counterFromStatName(name); } - Gauge& gauge(const std::string& name) override { + Gauge& gaugeFromStatName(StatName name) override { Thread::LockGuard lock(lock_); - return wrapped_scope_->gauge(name); + return wrapped_scope_->gaugeFromStatName(name); } - NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } + Histogram& histogramFromStatName(StatName name) override { + Thread::LockGuard lock(lock_); + return wrapped_scope_->histogramFromStatName(name); + } + NullGaugeImpl& nullGauge(const std::string& str) override { + return wrapped_scope_->nullGauge(str); + } + Counter& counter(const std::string& name) override { + StatNameTempStorage storage(name, symbolTable()); + return counterFromStatName(storage.statName()); + } + Gauge& gauge(const std::string& name) override { + StatNameTempStorage storage(name, symbolTable()); + return gaugeFromStatName(storage.statName()); + } Histogram& histogram(const std::string& name) override { - Thread::LockGuard lock(lock_); - return wrapped_scope_->histogram(name); + StatNameTempStorage storage(name, symbolTable()); + return histogramFromStatName(storage.statName()); } const SymbolTable& symbolTable() const override { return wrapped_scope_->symbolTable(); } @@ -100,7 +114,6 @@ class TestScopeWrapper : public Scope { Thread::MutexBasicLockable& lock_; ScopePtr wrapped_scope_; StatsOptionsImpl stats_options_; - NullGaugeImpl null_gauge_; }; /** @@ -111,6 +124,10 @@ class TestIsolatedStoreImpl : public StoreRoot { public: TestIsolatedStoreImpl() : source_(*this) {} // Stats::Scope + Counter& counterFromStatName(StatName name) override { + Thread::LockGuard lock(lock_); + return store_.counterFromStatName(name); + } Counter& counter(const std::string& name) override { Thread::LockGuard lock(lock_); return store_.counter(name); @@ -120,16 +137,26 @@ class TestIsolatedStoreImpl : public StoreRoot { return ScopePtr{new TestScopeWrapper(lock_, store_.createScope(name))}; } void deliverHistogramToSinks(const Histogram&, uint64_t) override {} + Gauge& gaugeFromStatName(StatName name) override { + Thread::LockGuard lock(lock_); + return store_.gaugeFromStatName(name); + } Gauge& gauge(const std::string& name) override { Thread::LockGuard lock(lock_); return store_.gauge(name); } - NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } + Histogram& histogramFromStatName(StatName name) override { + Thread::LockGuard lock(lock_); + return store_.histogramFromStatName(name); + } + NullGaugeImpl& nullGauge(const std::string& name) override { return store_.nullGauge(name); } Histogram& histogram(const std::string& name) override { Thread::LockGuard lock(lock_); return store_.histogram(name); } const StatsOptions& statsOptions() const override { return stats_options_; } + const SymbolTable& symbolTable() const override { return store_.symbolTable(); } + SymbolTable& symbolTable() override { return store_.symbolTable(); } // Stats::Store std::vector counters() const override { @@ -155,15 +182,11 @@ class TestIsolatedStoreImpl : public StoreRoot { void mergeHistograms(PostMergeCb) override {} Source& source() override { return source_; } - const SymbolTable& symbolTable() const override { return store_.symbolTable(); } - SymbolTable& symbolTable() override { return store_.symbolTable(); } - private: mutable Thread::MutexBasicLockable lock_; IsolatedStoreImpl store_; SourceImpl source_; StatsOptionsImpl stats_options_; - NullGaugeImpl null_gauge_; }; } // namespace Stats diff --git a/test/mocks/stats/BUILD b/test/mocks/stats/BUILD index 711c90c01fc29..6539e818467bc 100644 --- a/test/mocks/stats/BUILD +++ b/test/mocks/stats/BUILD @@ -21,6 +21,7 @@ envoy_cc_mock( "//source/common/stats:histogram_lib", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", + "//source/common/stats:store_impl_lib", "//test/mocks:common_lib", "//test/test_common:global_lib", ], diff --git a/test/mocks/stats/mocks.cc b/test/mocks/stats/mocks.cc index 1b132a3b378c9..9b1b4ea76904c 100644 --- a/test/mocks/stats/mocks.cc +++ b/test/mocks/stats/mocks.cc @@ -72,7 +72,7 @@ MockSource::~MockSource() {} MockSink::MockSink() {} MockSink::~MockSink() {} -MockStore::MockStore() { +MockStore::MockStore() : StoreImpl(*fake_symbol_table_) { ON_CALL(*this, counter(_)).WillByDefault(ReturnRef(counter_)); ON_CALL(*this, histogram(_)).WillByDefault(Invoke([this](const std::string& name) -> Histogram& { auto* histogram = new NiceMock; diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index ed628a5d8f815..a605c08bb5437 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -18,6 +18,7 @@ #include "common/stats/fake_symbol_table_impl.h" #include "common/stats/histogram_impl.h" #include "common/stats/isolated_store_impl.h" +#include "common/stats/store_impl.h" #include "test/test_common/global.h" @@ -150,7 +151,12 @@ class MockSink : public Sink { MOCK_METHOD2(onHistogramComplete, void(const Histogram& histogram, uint64_t value)); }; -class MockStore : public Store { +class SymbolTableProvider { +public: + Test::Global fake_symbol_table_; +}; + +class MockStore : public SymbolTableProvider, public StoreImpl { public: MockStore(); ~MockStore(); @@ -168,10 +174,6 @@ class MockStore : public Store { MOCK_CONST_METHOD0(histograms, std::vector()); MOCK_CONST_METHOD0(statsOptions, const StatsOptions&()); - SymbolTable& symbolTable() override { return symbol_table_.get(); } - const SymbolTable& symbolTable() const override { return symbol_table_.get(); } - - Test::Global symbol_table_; testing::NiceMock counter_; std::vector> histograms_; StatsOptionsImpl stats_options_; From ba1ecbb95b86aaf87b08b5d4624969fc4357d7c1 Mon Sep 17 00:00:00 2001 From: Jon Parise Date: Fri, 5 Apr 2019 11:52:28 -0700 Subject: [PATCH 065/165] watcher: notify when watched files are modified (#6215) The file system watcher previously only reported file movement events, but it's also useful to track changes to watched files (to trigger a reload, for example). This change adds support for tracking file modification events reports them using a new Watcher::Events::Modified. Both inotify (IN_MODIFY) and kqueue (NOTE_WRITE) are supported. This also reduces the set of watched inotify events to just the ones we need: IN_MODIFY | IN_MOVED_TO. Signed-off-by: Jon Parise --- include/envoy/filesystem/watcher.h | 3 ++- .../common/filesystem/inotify/watcher_impl.cc | 6 +++++- .../common/filesystem/kqueue/watcher_impl.cc | 5 ++++- test/common/filesystem/watcher_impl_test.cc | 19 +++++++++++++++++++ 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/include/envoy/filesystem/watcher.h b/include/envoy/filesystem/watcher.h index 25359be51c693..d50264254e4a6 100644 --- a/include/envoy/filesystem/watcher.h +++ b/include/envoy/filesystem/watcher.h @@ -20,6 +20,7 @@ class Watcher { struct Events { static const uint32_t MovedTo = 0x1; + static const uint32_t Modified = 0x2; }; virtual ~Watcher() {} @@ -36,4 +37,4 @@ class Watcher { using WatcherPtr = std::unique_ptr; } // namespace Filesystem -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc index f61ac242b5182..0bfac293b6dd8 100644 --- a/source/common/filesystem/inotify/watcher_impl.cc +++ b/source/common/filesystem/inotify/watcher_impl.cc @@ -40,7 +40,8 @@ void WatcherImpl::addWatch(const std::string& path, uint32_t events, OnChangedCb std::string directory = last_slash != 0 ? path.substr(0, last_slash) : "/"; std::string file = StringUtil::subspan(path, last_slash + 1, path.size()); - int watch_fd = inotify_add_watch(inotify_fd_, directory.c_str(), IN_ALL_EVENTS); + const uint32_t watch_mask = IN_MODIFY | IN_MOVED_TO; + int watch_fd = inotify_add_watch(inotify_fd_, directory.c_str(), watch_mask); if (watch_fd == -1) { throw EnvoyException( fmt::format("unable to add filesystem watch for file {}: {}", path, strerror(errno))); @@ -74,6 +75,9 @@ void WatcherImpl::onInotifyEvent() { file); uint32_t events = 0; + if (file_event->mask & IN_MODIFY) { + events |= Events::Modified; + } if (file_event->mask & IN_MOVED_TO) { events |= Events::MovedTo; } diff --git a/source/common/filesystem/kqueue/watcher_impl.cc b/source/common/filesystem/kqueue/watcher_impl.cc index f0e4ced2c2bfb..f5a030ba9b412 100644 --- a/source/common/filesystem/kqueue/watcher_impl.cc +++ b/source/common/filesystem/kqueue/watcher_impl.cc @@ -69,7 +69,7 @@ WatcherImpl::FileWatchPtr WatcherImpl::addWatch(const std::string& path, uint32_ watch->callback_ = cb; watch->watching_dir_ = watching_dir; - int flags = NOTE_DELETE | NOTE_RENAME; + u_int flags = NOTE_DELETE | NOTE_RENAME | NOTE_WRITE; if (watching_dir) { flags = NOTE_DELETE | NOTE_WRITE; } @@ -150,6 +150,9 @@ void WatcherImpl::onKqueueEvent() { if (event.fflags & NOTE_RENAME) { events |= Events::MovedTo; } + if (event.fflags & NOTE_WRITE) { + events |= Events::Modified; + } } ENVOY_LOG(debug, "notification: fd: {} flags: {:x} file: {}", file->fd_, event.fflags, diff --git a/test/common/filesystem/watcher_impl_test.cc b/test/common/filesystem/watcher_impl_test.cc index 6230759280a23..c911d46efe782 100644 --- a/test/common/filesystem/watcher_impl_test.cc +++ b/test/common/filesystem/watcher_impl_test.cc @@ -91,6 +91,25 @@ TEST_F(WatcherImplTest, Create) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_F(WatcherImplTest, Modify) { + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); + + TestUtility::createDirectory(TestEnvironment::temporaryPath("envoy_test")); + std::ofstream file(TestEnvironment::temporaryPath("envoy_test/watcher_target")); + + WatchCallback callback; + watcher->addWatch(TestEnvironment::temporaryPath("envoy_test/watcher_target"), + Watcher::Events::Modified, [&](uint32_t events) -> void { + callback.called(events); + dispatcher_->exit(); + }); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + file << "text" << std::flush; + EXPECT_CALL(callback, called(Watcher::Events::Modified)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); +} + TEST_F(WatcherImplTest, BadPath) { Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); From d1cdd252601cd3c4d97106591b6206af505b0fbd Mon Sep 17 00:00:00 2001 From: Bin Wu <46450037+wu-bin@users.noreply.github.com> Date: Fri, 5 Apr 2019 14:53:02 -0400 Subject: [PATCH 066/165] ci: Make envoy_select_quiche no-op. (#6393) Remove envoy_select_quiche from envoy_build_system.bzl. Risk Level: none. build only. Testing: bazel test --test_output=all test/extensions/quic_listeners/quiche/platform:all @com_googlesource_quiche//:all bazel test --test_output=all --define quiche=enabled test/extensions/quic_listeners/quiche/platform:all @com_googlesource_quiche//:all Signed-off-by: Bin Wu --- bazel/envoy_build_system.bzl | 6 +- ci/do_ci.sh | 26 ++-- .../quiche/platform/quic_logging_impl.cc | 1 + test/coverage/gen_build.sh | 5 + .../quiche/platform/quic_platform_test.cc | 132 ++++++++---------- test/run_envoy_bazel_coverage.sh | 35 +++-- test/test_common/utility.h | 2 +- 7 files changed, 107 insertions(+), 100 deletions(-) diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 61fdfbc557d94..c68fbe0d53262 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -500,6 +500,7 @@ def envoy_cc_test_infrastructure_library( tags = tags, alwayslink = 1, linkstatic = 1, + visibility = ["//visibility:public"], ) # Envoy C++ test related libraries (that want gtest, gmock) should be specified @@ -683,7 +684,4 @@ def envoy_select_boringssl(if_fips, default = None): # Selects the part of QUICHE that does not yet work with the current CI. def envoy_select_quiche(xs, repository = ""): - return select({ - repository + "//bazel:enable_quiche": xs, - "//conditions:default": [], - }) + return xs diff --git a/ci/do_ci.sh b/ci/do_ci.sh index c370cec2e3829..dff1ee8c1ceae 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -39,7 +39,7 @@ function bazel_with_collection() { function bazel_release_binary_build() { echo "Building..." - cd "${ENVOY_CI_DIR}" + pushd "${ENVOY_CI_DIR}" bazel build ${BAZEL_BUILD_OPTIONS} -c opt //source/exe:envoy-static collect_build_profile release_build # Copy the envoy-static binary somewhere that we can access outside of the @@ -54,6 +54,9 @@ function bazel_release_binary_build() { cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${ENVOY_SRCDIR}"/build_release mkdir -p "${ENVOY_SRCDIR}"/build_release_stripped strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${ENVOY_SRCDIR}"/build_release_stripped/envoy + # TODO(wu-bin): Remove once https://github.com/envoyproxy/envoy/pull/6229 is merged. + bazel clean + popd } function bazel_debug_binary_build() { @@ -231,11 +234,8 @@ elif [[ "$1" == "bazel.coverage" ]]; then # relocatable and hermetic-ish .par file. cd "${ENVOY_SRCDIR}" bazel build @com_github_gcovr_gcovr//:gcovr.par - export GCOVR="${ENVOY_SRCDIR}/bazel-bin/external/com_github_gcovr_gcovr/gcovr.par" - - export GCOVR_DIR="${ENVOY_BUILD_DIR}/bazel-envoy" - export TESTLOGS_DIR="${ENVOY_BUILD_DIR}/bazel-testlogs" - export WORKSPACE=ci + export GCOVR="/tmp/gcovr.par" + cp -f "${ENVOY_SRCDIR}/bazel-bin/external/com_github_gcovr_gcovr/gcovr.par" ${GCOVR} # Reduce the amount of memory and number of cores Bazel tries to use to # prevent it from launching too many subprocesses. This should prevent the @@ -245,21 +245,13 @@ elif [[ "$1" == "bazel.coverage" ]]; then # after 0.21. [ -z "$CIRCLECI" ] || export BAZEL_TEST_OPTIONS="${BAZEL_TEST_OPTIONS} --local_resources=12288,4,1" - # There is a bug in gcovr 3.3, where it takes the -r path, - # in our case /source, and does a regex replacement of various - # source file paths during HTML generation. It attempts to strip - # out the prefix (e.g. /source), but because it doesn't do a match - # and only strip at the start of the string, it removes /source from - # the middle of the string, corrupting the path. The workaround is - # to point -r in the gcovr invocation in run_envoy_bazel_coverage.sh at - # some Bazel created symlinks to the source directory in its output - # directory. Wow. - cd "${ENVOY_BUILD_DIR}" - SRCDIR="${GCOVR_DIR}" "${ENVOY_SRCDIR}"/test/run_envoy_bazel_coverage.sh + test/run_envoy_bazel_coverage.sh collect_build_profile coverage exit 0 elif [[ "$1" == "bazel.clang_tidy" ]]; then setup_clang_toolchain + # TODO(wu-bin): Remove once https://github.com/envoyproxy/envoy/pull/6229 is merged. + export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --linkopt='-Wl,--allow-multiple-definition'" cd "${ENVOY_CI_DIR}" ./run_clang_tidy.sh exit 0 diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc index 578fbffc28335..895f1bbcb8d3c 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc @@ -40,6 +40,7 @@ QuicLogEmitter::~QuicLogEmitter() { } if (level_ == FATAL) { + GetLogger().flush(); #ifdef NDEBUG // Release mode. abort(); diff --git a/test/coverage/gen_build.sh b/test/coverage/gen_build.sh index 340a9a7a62ec3..a1274e96284ac 100755 --- a/test/coverage/gen_build.sh +++ b/test/coverage/gen_build.sh @@ -26,6 +26,10 @@ set -e rm -f "${BUILD_PATH}" TARGETS=$("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}//test/...)" | grep "^//") + +# Run the QUICHE platform api tests for coverage. +TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', '@com_googlesource_quiche//:all')" | grep "^@com_googlesource_quiche")" + if [ -n "${EXTRA_QUERY_PATHS}" ]; then TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${EXTRA_QUERY_PATHS})" | grep "^//")" fi @@ -68,4 +72,5 @@ EOF ) > "${BUILD_PATH}" +echo "Generated coverage BUILD file at: ${BUILD_PATH}" "${BUILDIFIER_BIN}" "${BUILD_PATH}" diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index e22003edfeb52..8ec7107bf27da 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -50,16 +50,33 @@ using testing::HasSubstr; namespace quic { namespace { -TEST(QuicPlatformTest, QuicAlignOf) { EXPECT_LT(0, QUIC_ALIGN_OF(int)); } +class QuicPlatformTest : public testing::Test { +protected: + QuicPlatformTest() + : log_level_(GetLogger().level()), verbosity_log_threshold_(GetVerbosityLogThreshold()) { + SetVerbosityLogThreshold(0); + GetLogger().set_level(ERROR); + } + + ~QuicPlatformTest() { + SetVerbosityLogThreshold(verbosity_log_threshold_); + GetLogger().set_level(log_level_); + } + + const QuicLogLevel log_level_; + const int verbosity_log_threshold_; +}; + +TEST_F(QuicPlatformTest, QuicAlignOf) { EXPECT_LT(0, QUIC_ALIGN_OF(int)); } -TEST(QuicPlatformTest, QuicArraysize) { +TEST_F(QuicPlatformTest, QuicArraysize) { int array[] = {0, 1, 2, 3, 4}; EXPECT_EQ(5, QUIC_ARRAYSIZE(array)); } enum class TestEnum { ZERO = 0, ONE, TWO, COUNT }; -TEST(QuicPlatformTest, QuicBugTracker) { +TEST_F(QuicPlatformTest, QuicBugTracker) { EXPECT_DEBUG_DEATH(QUIC_BUG << "Here is a bug,", " bug"); EXPECT_DEBUG_DEATH(QUIC_BUG_IF(true) << "There is a bug,", " bug"); EXPECT_LOG_NOT_CONTAINS("error", "", QUIC_BUG_IF(false) << "A feature is not a bug."); @@ -69,7 +86,7 @@ TEST(QuicPlatformTest, QuicBugTracker) { EXPECT_LOG_NOT_CONTAINS("error", "", QUIC_PEER_BUG_IF(false) << "But not there."); } -TEST(QuicPlatformTest, QuicClientStats) { +TEST_F(QuicPlatformTest, QuicClientStats) { // Just make sure they compile. QUIC_CLIENT_HISTOGRAM_ENUM("my.enum.histogram", TestEnum::ONE, TestEnum::COUNT, "doc"); QUIC_CLIENT_HISTOGRAM_BOOL("my.bool.histogram", false, "doc"); @@ -80,7 +97,7 @@ TEST(QuicPlatformTest, QuicClientStats) { QuicClientSparseHistogram("my.sparse.histogram", 345); } -TEST(QuicPlatformTest, QuicExpectBug) { +TEST_F(QuicPlatformTest, QuicExpectBug) { auto bug = [](const char* error_message) { QUIC_BUG << error_message; }; auto peer_bug = [](const char* error_message) { QUIC_PEER_BUG << error_message; }; @@ -92,7 +109,7 @@ TEST(QuicPlatformTest, QuicExpectBug) { EXPECT_QUIC_PEER_BUG(peer_bug("peer_bug_2 is expected"), "peer_bug_2"); } -TEST(QuicPlatformTest, QuicExportedStats) { +TEST_F(QuicPlatformTest, QuicExportedStats) { // Just make sure they compile. QUIC_HISTOGRAM_ENUM("my.enum.histogram", TestEnum::ONE, TestEnum::COUNT, "doc"); QUIC_HISTOGRAM_BOOL("my.bool.histogram", false, "doc"); @@ -102,7 +119,7 @@ TEST(QuicPlatformTest, QuicExportedStats) { QUIC_HISTOGRAM_COUNTS("my.count.histogram", 123, 0, 1000, 100, "doc"); } -TEST(QuicPlatformTest, QuicHostnameUtils) { +TEST_F(QuicPlatformTest, QuicHostnameUtils) { EXPECT_FALSE(QuicHostnameUtils::IsValidSNI("!!")); EXPECT_FALSE(QuicHostnameUtils::IsValidSNI("envoyproxy")); EXPECT_TRUE(QuicHostnameUtils::IsValidSNI("www.envoyproxy.io")); @@ -111,48 +128,48 @@ TEST(QuicPlatformTest, QuicHostnameUtils) { EXPECT_EQ("quicwg.org", QuicHostnameUtils::NormalizeHostname("QUICWG.ORG")); } -TEST(QuicPlatformTest, QuicUnorderedMap) { +TEST_F(QuicPlatformTest, QuicUnorderedMap) { QuicUnorderedMap umap; umap.insert({"foo", 2}); EXPECT_EQ(2, umap["foo"]); } -TEST(QuicPlatformTest, QuicUnorderedSet) { +TEST_F(QuicPlatformTest, QuicUnorderedSet) { QuicUnorderedSet uset({"foo", "bar"}); EXPECT_EQ(1, uset.count("bar")); EXPECT_EQ(0, uset.count("qux")); } -TEST(QuicPlatformTest, QuicQueue) { +TEST_F(QuicPlatformTest, QuicQueue) { QuicQueue queue; queue.push(10); EXPECT_EQ(10, queue.back()); } -TEST(QuicPlatformTest, QuicDeque) { +TEST_F(QuicPlatformTest, QuicDeque) { QuicDeque deque; deque.push_back(10); EXPECT_EQ(10, deque.back()); } -TEST(QuicPlatformTest, QuicInlinedVector) { +TEST_F(QuicPlatformTest, QuicInlinedVector) { QuicInlinedVector vec; vec.push_back(3); EXPECT_EQ(3, vec[0]); } -TEST(QuicPlatformTest, QuicEndian) { +TEST_F(QuicPlatformTest, QuicEndian) { EXPECT_EQ(0x1234, QuicEndian::NetToHost16(QuicEndian::HostToNet16(0x1234))); EXPECT_EQ(0x12345678, QuicEndian::NetToHost32(QuicEndian::HostToNet32(0x12345678))); } -TEST(QuicPlatformTest, QuicEstimateMemoryUsage) { +TEST_F(QuicPlatformTest, QuicEstimateMemoryUsage) { std::string s = "foo"; // Stubbed out to always return 0. EXPECT_EQ(0, QuicEstimateMemoryUsage(s)); } -TEST(QuicPlatformTest, QuicMapUtil) { +TEST_F(QuicPlatformTest, QuicMapUtil) { std::map stdmap = {{"one", 1}, {"two", 2}, {"three", 3}}; EXPECT_TRUE(QuicContainsKey(stdmap, "one")); EXPECT_FALSE(QuicContainsKey(stdmap, "zero")); @@ -170,7 +187,7 @@ TEST(QuicPlatformTest, QuicMapUtil) { EXPECT_FALSE(QuicContainsValue(stdvec, 0)); } -TEST(QuicPlatformTest, QuicMockLog) { +TEST_F(QuicPlatformTest, QuicMockLog) { ASSERT_EQ(ERROR, GetLogger().level()); { @@ -199,7 +216,7 @@ TEST(QuicPlatformTest, QuicMockLog) { QUIC_LOG(ERROR) << "Outer log message should be captured."; } -TEST(QuicPlatformTest, QuicServerStats) { +TEST_F(QuicPlatformTest, QuicServerStats) { // Just make sure they compile. QUIC_SERVER_HISTOGRAM_ENUM("my.enum.histogram", TestEnum::ONE, TestEnum::COUNT, "doc"); QUIC_SERVER_HISTOGRAM_BOOL("my.bool.histogram", false, "doc"); @@ -209,19 +226,19 @@ TEST(QuicPlatformTest, QuicServerStats) { QUIC_SERVER_HISTOGRAM_COUNTS("my.count.histogram", 123, 0, 1000, 100, "doc"); } -TEST(QuicPlatformTest, QuicStackTraceTest) { +TEST_F(QuicPlatformTest, QuicStackTraceTest) { EXPECT_THAT(QuicStackTrace(), HasSubstr("QuicStackTraceTest")); } -TEST(QuicPlatformTest, QuicSleep) { QuicSleep(QuicTime::Delta::FromMilliseconds(20)); } +TEST_F(QuicPlatformTest, QuicSleep) { QuicSleep(QuicTime::Delta::FromMilliseconds(20)); } -TEST(QuicPlatformTest, QuicStringPiece) { +TEST_F(QuicPlatformTest, QuicStringPiece) { std::string s = "bar"; QuicStringPiece sp(s); EXPECT_EQ('b', sp[0]); } -TEST(QuicPlatformTest, QuicThread) { +TEST_F(QuicPlatformTest, QuicThread) { class AdderThread : public QuicThread { public: AdderThread(int* value, int increment) @@ -252,17 +269,17 @@ TEST(QuicPlatformTest, QuicThread) { EXPECT_EQ(1, value); // QuicThread will panic if it's started but not joined. - EXPECT_DEATH({ AdderThread(&value, 2).Start(); }, - "QuicThread should be joined before destruction"); + EXPECT_DEATH_LOG_TO_STDERR({ AdderThread(&value, 2).Start(); }, + "QuicThread should be joined before destruction"); } -TEST(QuicPlatformTest, QuicUint128) { +TEST_F(QuicPlatformTest, QuicUint128) { QuicUint128 i = MakeQuicUint128(16777216, 315); EXPECT_EQ(315, QuicUint128Low64(i)); EXPECT_EQ(16777216, QuicUint128High64(i)); } -TEST(QuicPlatformTest, QuicPtrUtil) { +TEST_F(QuicPlatformTest, QuicPtrUtil) { auto p = QuicMakeUnique("abc"); EXPECT_EQ("abc", *p); @@ -270,28 +287,7 @@ TEST(QuicPlatformTest, QuicPtrUtil) { EXPECT_EQ("aaa", *p); } -namespace { - -class QuicLogThresholdSaver { -public: - QuicLogThresholdSaver() - : level_(GetLogger().level()), verbosity_threshold_(GetVerbosityLogThreshold()) {} - - ~QuicLogThresholdSaver() { - SetVerbosityLogThreshold(verbosity_threshold_); - GetLogger().set_level(level_); - } - -private: - const QuicLogLevel level_; - const int verbosity_threshold_; -}; - -} // namespace - -TEST(QuicPlatformTest, QuicLog) { - QuicLogThresholdSaver saver; - +TEST_F(QuicPlatformTest, QuicLog) { // By default, tests emit logs at level ERROR or higher. ASSERT_EQ(ERROR, GetLogger().level()); @@ -335,9 +331,7 @@ TEST(QuicPlatformTest, QuicLog) { #define VALUE_BY_COMPILE_MODE(debug_mode_value, release_mode_value) debug_mode_value #endif -TEST(QuicPlatformTest, QuicDLog) { - QuicLogThresholdSaver saver; - +TEST_F(QuicPlatformTest, QuicDLog) { int i = 0; GetLogger().set_level(ERROR); @@ -375,7 +369,7 @@ TEST(QuicPlatformTest, QuicDLog) { #undef VALUE_BY_COMPILE_MODE -TEST(QuicPlatformTest, QuicCHECK) { +TEST_F(QuicPlatformTest, QuicCHECK) { CHECK(1 == 1); CHECK(1 == 1) << " 1 == 1 is forever true."; @@ -383,31 +377,31 @@ TEST(QuicPlatformTest, QuicCHECK) { "CHECK failed:.* Supposed to fail in debug mode."); EXPECT_DEBUG_DEATH({ DCHECK(false); }, "CHECK failed"); - EXPECT_DEATH({ CHECK(false) << " Supposed to fail in all modes."; }, - "CHECK failed:.* Supposed to fail in all modes."); - EXPECT_DEATH({ CHECK(false); }, "CHECK failed"); + EXPECT_DEATH_LOG_TO_STDERR({ CHECK(false) << " Supposed to fail in all modes."; }, + "CHECK failed:.* Supposed to fail in all modes."); + EXPECT_DEATH_LOG_TO_STDERR({ CHECK(false); }, "CHECK failed"); } // Test the behaviors of the cross products of // // {QUIC_LOG, QUIC_DLOG} x {FATAL, DFATAL} x {debug, release} -TEST(QuicPlatformTest, QuicFatalLog) { +TEST_F(QuicPlatformTest, QuicFatalLog) { #ifdef NDEBUG // Release build - EXPECT_DEATH(QUIC_LOG(FATAL) << "Should abort 0", "Should abort 0"); + EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(FATAL) << "Should abort 0", "Should abort 0"); QUIC_LOG(DFATAL) << "Should not abort"; QUIC_DLOG(FATAL) << "Should compile out"; QUIC_DLOG(DFATAL) << "Should compile out"; #else // Debug build - EXPECT_DEATH(QUIC_LOG(FATAL) << "Should abort 1", "Should abort 1"); - EXPECT_DEATH(QUIC_LOG(DFATAL) << "Should abort 2", "Should abort 2"); - EXPECT_DEATH(QUIC_DLOG(FATAL) << "Should abort 3", "Should abort 3"); - EXPECT_DEATH(QUIC_DLOG(DFATAL) << "Should abort 4", "Should abort 4"); + EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(FATAL) << "Should abort 1", "Should abort 1"); + EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(DFATAL) << "Should abort 2", "Should abort 2"); + EXPECT_DEATH_LOG_TO_STDERR(QUIC_DLOG(FATAL) << "Should abort 3", "Should abort 3"); + EXPECT_DEATH_LOG_TO_STDERR(QUIC_DLOG(DFATAL) << "Should abort 4", "Should abort 4"); #endif } -TEST(QuicPlatformTest, QuicBranchPrediction) { +TEST_F(QuicPlatformTest, QuicBranchPrediction) { GetLogger().set_level(INFO); if (QUIC_PREDICT_FALSE(rand() % RAND_MAX == 123456789)) { @@ -417,15 +411,15 @@ TEST(QuicPlatformTest, QuicBranchPrediction) { } } -TEST(QuicPlatformTest, QuicNotReached) { +TEST_F(QuicPlatformTest, QuicNotReached) { #ifdef NDEBUG QUIC_NOTREACHED(); // Expect no-op. #else - EXPECT_DEATH(QUIC_NOTREACHED(), "not reached"); + EXPECT_DEATH_LOG_TO_STDERR(QUIC_NOTREACHED(), "not reached"); #endif } -TEST(QuicPlatformTest, QuicMutex) { +TEST_F(QuicPlatformTest, QuicMutex) { QuicMutex mu; QuicWriterMutexLock wmu(&mu); @@ -438,7 +432,7 @@ TEST(QuicPlatformTest, QuicMutex) { mu.WriterLock(); } -TEST(QuicPlatformTest, QuicNotification) { +TEST_F(QuicPlatformTest, QuicNotification) { QuicNotification notification; EXPECT_FALSE(notification.HasBeenNotified()); notification.Notify(); @@ -446,7 +440,7 @@ TEST(QuicPlatformTest, QuicNotification) { EXPECT_TRUE(notification.HasBeenNotified()); } -TEST(QuicPlatformTest, QuicCertUtils) { +TEST_F(QuicPlatformTest, QuicCertUtils) { bssl::UniquePtr x509_cert = Envoy::Extensions::TransportSockets::Tls::readCertFromFile(Envoy::TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); @@ -467,9 +461,7 @@ TEST(QuicPlatformTest, QuicCertUtils) { OPENSSL_free(static_cast(der)); } -TEST(QuicPlatformTest, QuicTestOutput) { - QuicLogThresholdSaver saver; - +TEST_F(QuicPlatformTest, QuicTestOutput) { Envoy::TestEnvironment::setEnvVar("QUIC_TEST_OUTPUT_DIR", "/tmp", /*overwrite=*/false); // Set log level to INFO to see the test output path in log. @@ -485,7 +477,7 @@ TEST(QuicPlatformTest, QuicTestOutput) { class FileUtilsTest : public testing::Test { public: - FileUtilsTest() : dir_path_(Envoy::TestEnvironment::temporaryPath("envoy_test")) { + FileUtilsTest() : dir_path_(Envoy::TestEnvironment::temporaryPath("quic_file_util_test")) { files_to_remove_.push(dir_path_); } diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 58845bcc9b9dc..21a10464b774b 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -10,20 +10,36 @@ set -e [[ -z "${WORKSPACE}" ]] && WORKSPACE=envoy [[ -z "${VALIDATE_COVERAGE}" ]] && VALIDATE_COVERAGE=true +echo "Starting run_envoy_bazel_coverage.sh..." +echo " PWD=$(pwd)" +echo " SRCDIR=${SRCDIR}" +echo " GCOVR_DIR=${GCOVR_DIR}" +echo " TESTLOGS_DIR=${TESTLOGS_DIR}" +echo " BAZEL_COVERAGE=${BAZEL_COVERAGE}" +echo " GCOVR=${GCOVR}" +echo " WORKSPACE=${WORKSPACE}" +echo " VALIDATE_COVERAGE=${VALIDATE_COVERAGE}" + # This is the target that will be run to generate coverage data. It can be overridden by consumer # projects that want to run coverage on a different/combined target. [[ -z "${COVERAGE_TARGET}" ]] && COVERAGE_TARGET="//test/coverage:coverage_tests" +# This is where we are going to copy the .gcno files into. +GCNO_ROOT=bazel-out/k8-dbg/bin/"${COVERAGE_TARGET/:/\/}".runfiles/"${WORKSPACE}" +echo " GCNO_ROOT=${GCNO_ROOT}" +rm -rf ${GCNO_ROOT} # Make sure ${COVERAGE_TARGET} is up-to-date. SCRIPT_DIR="$(realpath "$(dirname "$0")")" (BAZEL_BIN="${BAZEL_COVERAGE}" "${SCRIPT_DIR}"/coverage/gen_build.sh) echo "Cleaning .gcda/.gcov from previous coverage runs..." +NUM_PREVIOUS_GCOV_FILES=0 for f in $(find -L "${GCOVR_DIR}" -name "*.gcda" -o -name "*.gcov") do rm -f "${f}" + let NUM_PREVIOUS_GCOV_FILES=NUM_PREVIOUS_GCOV_FILES+1 done -echo "Cleanup completed." +echo "Cleanup completed. ${NUM_PREVIOUS_GCOV_FILES} files deleted." # Force dbg for path consistency later, don't include debug code in coverage. BAZEL_TEST_OPTIONS="${BAZEL_TEST_OPTIONS} -c dbg --copt=-DNDEBUG" @@ -54,19 +70,22 @@ COVERAGE_SUMMARY="${COVERAGE_DIR}/coverage_summary.txt" # Copy .gcno objects into the same location that we find the .gcda. # TODO(htuch): Should use rsync, but there are some symlink loops to fight. -pushd "${GCOVR_DIR}" +echo "Finding and copying .gcno files in GCOVR_DIR: ${GCOVR_DIR}" +mkdir -p ${GCNO_ROOT} +NUM_GCNO_FILES=0 for f in $(find -L bazel-out/ -name "*.gcno") do - cp --parents "$f" bazel-out/k8-dbg/bin/"${COVERAGE_TARGET/:/\/}".runfiles/"${WORKSPACE}" + cp --parents "$f" ${GCNO_ROOT}/ + let NUM_GCNO_FILES=NUM_GCNO_FILES+1 done -popd +echo "OK: copied ${NUM_GCNO_FILES} .gcno files" # gcovr is extremely picky about where it is run and where the paths of the # original source are relative to its execution location. -cd "${SRCDIR}" -echo "Running gcovr..." -time "${GCOVR}" --gcov-exclude="${GCOVR_EXCLUDE_REGEX}" \ - --exclude-directories="${GCOVR_EXCLUDE_DIR}" --object-directory="${GCOVR_DIR}" -r "${SRCDIR}" \ +cd -P "${GCOVR_DIR}" +echo "Running gcovr in $(pwd)..." +time "${GCOVR}" -v --gcov-exclude="${GCOVR_EXCLUDE_REGEX}" \ + --exclude-directories="${GCOVR_EXCLUDE_DIR}" -r . \ --html --html-details --exclude-unreachable-branches --print-summary \ -o "${COVERAGE_DIR}"/coverage.html > "${COVERAGE_SUMMARY}" diff --git a/test/test_common/utility.h b/test/test_common/utility.h index a2cb7548c2c84..2c1464bf05505 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -80,7 +80,7 @@ namespace Envoy { */ #define EXPECT_DEATH_LOG_TO_STDERR(statement, message) \ do { \ - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); \ + Envoy::Logger::StderrSinkDelegate stderr_sink(Envoy::Logger::Registry::getSink()); \ EXPECT_DEATH(statement, message); \ } while (false) From dcbe3fefc49c6ab845f0d186962c18f4b2853232 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Fri, 5 Apr 2019 11:54:32 -0700 Subject: [PATCH 067/165] subset lb: avoid partitioning host lists on worker threads (#6302) This changes the subset LB to not read host.health() on the worker thread, instead relying on the partitioning made on the main thread. This ensures consistency as host.health() might change due to health checks, config updates, etc. Also ensures consistent handling of metadata reads by caching the result of the predicate. This is likely to come with a perf improvement, as we're now making fewer metadata reads. Addresses #6301 Signed-off-by: Snow Pettersen --- source/common/upstream/subset_lb.cc | 98 ++++++++++++++------------ test/common/upstream/subset_lb_test.cc | 19 ++++- 2 files changed, 72 insertions(+), 45 deletions(-) diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 02e231e58e6fe..19d68fb4b19be 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -513,70 +513,80 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added, const HostVector& hosts_removed, std::function predicate) { - std::unordered_set predicate_added; - - HostVector filtered_added; - for (const auto host : hosts_added) { + // We cache the result of matching the host against the predicate. This ensures + // that we maintain a consistent view of the metadata and saves on computation + // since metadata lookups can be expensive. + // + // We use an unordered_set because this can potentially be in the tens of thousands. + std::unordered_set matching_hosts; + + auto cached_predicate = [&matching_hosts](const auto& host) { + return matching_hosts.count(&host) == 1; + }; + + // TODO(snowp): If we had a unhealthyHosts() function we could avoid potentially traversing + // the list of hosts twice. + auto hosts = std::make_shared(); + hosts->reserve(original_host_set_.hosts().size()); + for (const auto& host : original_host_set_.hosts()) { if (predicate(*host)) { - predicate_added.insert(host); - filtered_added.emplace_back(host); + matching_hosts.insert(host.get()); + hosts->emplace_back(host); } } - HostVector filtered_removed; - for (const auto host : hosts_removed) { - if (predicate(*host)) { - filtered_removed.emplace_back(host); + auto healthy_hosts = std::make_shared(); + healthy_hosts->reserve(original_host_set_.healthyHosts().size()); + for (const auto& host : original_host_set_.healthyHosts()) { + if (cached_predicate(*host)) { + healthy_hosts->emplace_back(host); } } - HostVectorSharedPtr hosts(new HostVector()); - HostVectorSharedPtr healthy_hosts(new HostVector()); - HostVectorSharedPtr degraded_hosts(new HostVector()); - - // It's possible that hosts_added == original_host_set_.hosts(), e.g.: when - // calling refreshSubsets() if only metadata change. If so, we can avoid the - // predicate() call. - for (const auto host : original_host_set_.hosts()) { - bool host_seen = predicate_added.count(host) == 1; - if (host_seen || predicate(*host)) { - hosts->emplace_back(host); - switch (host->health()) { - case Host::Health::Healthy: - healthy_hosts->emplace_back(host); - break; - case Host::Health::Degraded: - degraded_hosts->emplace_back(host); - break; - case Host::Health::Unhealthy: - break; - } + auto degraded_hosts = std::make_shared(); + degraded_hosts->reserve(original_host_set_.degradedHosts().size()); + for (const auto& host : original_host_set_.degradedHosts()) { + if (cached_predicate(*host)) { + degraded_hosts->emplace_back(host); } } - // Calling predicate() is expensive since it involves metadata lookups; so we - // avoid it in the 2nd call to filter() by using the result from the first call - // to filter() as the starting point. - // - // Also, if we only have one locality we can avoid the first call to filter() by + // If we only have one locality we can avoid the first call to filter() by // just creating a new HostsPerLocality from the list of all hosts. // // TODO(rgs1): merge these two filter() calls in one loop. HostsPerLocalityConstSharedPtr hosts_per_locality; if (original_host_set_.hostsPerLocality().get().size() == 1) { - hosts_per_locality.reset( - new HostsPerLocalityImpl(*hosts, original_host_set_.hostsPerLocality().hasLocalLocality())); + hosts_per_locality = std::make_shared( + *hosts, original_host_set_.hostsPerLocality().hasLocalLocality()); } else { - hosts_per_locality = original_host_set_.hostsPerLocality().filter(predicate); + hosts_per_locality = original_host_set_.hostsPerLocality().filter(cached_predicate); } - HostsPerLocalityConstSharedPtr healthy_hosts_per_locality = hosts_per_locality->filter( - [](const Host& host) { return host.health() == Host::Health::Healthy; }); - HostsPerLocalityConstSharedPtr degraded_hosts_per_locality = hosts_per_locality->filter( - [](const Host& host) { return host.health() == Host::Health::Degraded; }); + HostsPerLocalityConstSharedPtr healthy_hosts_per_locality = + original_host_set_.healthyHostsPerLocality().filter(cached_predicate); + HostsPerLocalityConstSharedPtr degraded_hosts_per_locality = + original_host_set_.degradedHostsPerLocality().filter(cached_predicate); + + // We can use the cached predicate here, since we trust that the hosts in hosts_added were also + // present in the list of all hosts. + HostVector filtered_added; + for (const auto& host : hosts_added) { + if (cached_predicate(*host)) { + filtered_added.emplace_back(host); + } + } + + // Since the removed hosts would not be present in the list of all hosts, we need to evaluate the + // predicate directly for these hosts. + HostVector filtered_removed; + for (const auto& host : hosts_removed) { + if (predicate(*host)) { + filtered_removed.emplace_back(host); + } + } - // TODO(snowp): Use partitionHosts here. HostSetImpl::updateHosts(HostSetImpl::updateHostsParams( hosts, hosts_per_locality, healthy_hosts, healthy_hosts_per_locality, degraded_hosts, degraded_hosts_per_locality), diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index fc313ebcabcab..6dbd9b62c047e 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -198,7 +198,7 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { host_set_.hosts_ = hosts; host_set_.hosts_per_locality_ = makeHostsPerLocality(std::move(hosts_per_locality)); - host_set_.healthy_hosts_ = host_set_.healthy_hosts_; + host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; local_hosts_.reset(new HostVector()); @@ -1458,6 +1458,23 @@ TEST_F(SubsetLoadBalancerTest, DisabledLocalityWeightAwareness) { EXPECT_EQ(host_set_.healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(&context)); } +// Verifies that we do *not* invoke health() on hosts when constructing the load balancer. Since +// health is modified concurrently from multiple threads, it is not safe to call on the worker +// threads. +TEST_F(SubsetLoadBalancerTest, DoesNotCheckHostHealth) { + EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true)); + + auto mock_host = std::make_shared(); + HostVector hosts{mock_host}; + host_set_.hosts_ = hosts; + + EXPECT_CALL(*mock_host, weight()).WillRepeatedly(Return(1)); + + lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, + random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_)); +} + TEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) { EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true)); EXPECT_CALL(subset_info_, localityWeightAware()).WillRepeatedly(Return(true)); From 70c408b858f9b9824ed9109f2264eacc20724d4d Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 5 Apr 2019 17:21:14 -0700 Subject: [PATCH 068/165] build: update PGV url (#6495) Signed-off-by: Lizan Zhou --- api/bazel/repository_locations.bzl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 6d68524399fad..e4489eb3b17bd 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -25,7 +25,7 @@ REPOSITORY_LOCATIONS = dict( com_lyft_protoc_gen_validate = dict( sha256 = PGV_SHA256, strip_prefix = "protoc-gen-validate-" + PGV_RELEASE, - urls = ["https://github.com/lyft/protoc-gen-validate/archive/v" + PGV_RELEASE + ".tar.gz"], + urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/v" + PGV_RELEASE + ".tar.gz"], ), googleapis = dict( # TODO(dio): Consider writing a Skylark macro for importing Google API proto. From 7c627275f9de3e59990a85b24d240e67a8197131 Mon Sep 17 00:00:00 2001 From: soya3129 <43042079+soya3129@users.noreply.github.com> Date: Sat, 6 Apr 2019 13:57:07 -0400 Subject: [PATCH 069/165] Common: Introduce StopAllIteration filter status for decoding and encoding filters (#5954) Signed-off-by: Yang Song --- include/envoy/http/filter.h | 25 +- source/common/http/conn_manager_impl.cc | 175 ++++++++---- source/common/http/conn_manager_impl.h | 75 +++++- test/common/http/conn_manager_impl_test.cc | 142 ++++++++-- test/integration/BUILD | 3 + test/integration/filters/BUILD | 48 ++++ .../filters/call_decodedata_once_filter.cc | 48 ++++ .../decode_headers_return_stop_all_filter.cc | 120 +++++++++ .../encode_headers_return_stop_all_filter.cc | 93 +++++++ test/integration/protocol_integration_test.cc | 252 +++++++++++++++++- 10 files changed, 899 insertions(+), 82 deletions(-) create mode 100644 test/integration/filters/call_decodedata_once_filter.cc create mode 100644 test/integration/filters/decode_headers_return_stop_all_filter.cc create mode 100644 test/integration/filters/encode_headers_return_stop_all_filter.cc diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index c24a07e928b17..eec28670591ed 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -33,7 +33,30 @@ enum class FilterHeadersStatus { StopIteration, // Continue iteration to remaining filters, but ignore any subsequent data or trailers. This // results in creating a header only request/response. - ContinueAndEndStream + ContinueAndEndStream, + // Do not iterate for headers as well as data and trailers for the current filter and the filters + // following, and buffer body data for later dispatching. ContinueDecoding() MUST + // be called if continued filter iteration is desired. + // + // Used when a filter wants to stop iteration on data and trailers while waiting for headers' + // iteration to resume. + // + // If buffering the request causes buffered data to exceed the configured buffer limit, a 413 will + // be sent to the user. On the response path exceeding buffer limits will result in a 500. + // + // TODO(soya3129): stop metadata parsing when StopAllIterationAndBuffer is set. + StopAllIterationAndBuffer, + // Do not iterate for headers as well as data and trailers for the current filter and the filters + // following, and buffer body data for later dispatching. continueDecoding() MUST + // be called if continued filter iteration is desired. + // + // Used when a filter wants to stop iteration on data and trailers while waiting for headers' + // iteration to resume. + // + // This will cause the flow of incoming data to cease until continueDecoding() function is called. + // + // TODO(soya3129): stop metadata parsing when StopAllIterationAndWatermark is set. + StopAllIterationAndWatermark, }; /** diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 80932dc69bd11..1fe100755b71d 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -793,13 +793,10 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilter* filter, HeaderMap& headers, bool end_stream) { - std::list::iterator entry; + // Headers filter iteration should always start with the next filter if available. + std::list::iterator entry = + commonDecodePrefix(filter, FilterIterationStartState::AlwaysStartFromNext); std::list::iterator continue_data_entry = decoder_filters_.end(); - if (!filter) { - entry = decoder_filters_.begin(); - } else { - entry = std::next(filter->entry()); - } for (; entry != decoder_filters_.end(); entry++) { ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders)); @@ -830,9 +827,11 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte if (continue_data_entry != decoder_filters_.end()) { // We use the continueDecoding() code since it will correctly handle not calling - // decodeHeaders() again. Fake setting stopped_ since the continueDecoding() code expects it. + // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code + // expects it. ASSERT(buffered_request_data_); - (*continue_data_entry)->stopped_ = true; + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueDecoding(); } @@ -845,11 +844,12 @@ void ConnectionManagerImpl::ActiveStream::decodeData(Buffer::Instance& data, boo maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); - decodeData(nullptr, data, end_stream); + decodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); } -void ConnectionManagerImpl::ActiveStream::decodeData(ActiveStreamDecoderFilter* filter, - Buffer::Instance& data, bool end_stream) { +void ConnectionManagerImpl::ActiveStream::decodeData( + ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state) { resetIdleTimer(); // If we previously decided to decode only the headers, do nothing here. @@ -863,16 +863,17 @@ void ConnectionManagerImpl::ActiveStream::decodeData(ActiveStreamDecoderFilter* return; } - std::list::iterator entry; auto trailers_added_entry = decoder_filters_.end(); const bool trailers_exists_at_start = request_trailers_ != nullptr; - if (!filter) { - entry = decoder_filters_.begin(); - } else { - entry = std::next(filter->entry()); - } + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonDecodePrefix(filter, filter_iteration_start_state); for (; entry != decoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame types, return now. + if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) { + return; + } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // // In following case, ActiveStreamFilterBase::commonContinue() could be called recursively and @@ -979,7 +980,7 @@ void ConnectionManagerImpl::ActiveStream::addDecodedData(ActiveStreamDecoderFilt } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. - decodeData(&filter, data, false); + decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { // TODO(mattklein123): Formalize error handling for filters and add tests. Should probably // throw an exception here. @@ -1006,14 +1007,16 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(ActiveStreamDecoderFilt return; } - std::list::iterator entry; - if (!filter) { - entry = decoder_filters_.begin(); - } else { - entry = std::next(filter->entry()); - } + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); for (; entry != decoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame type, return now. + if ((*entry)->stoppedAll()) { + return; + } + ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeTrailers)); state_.filter_call_state_ |= FilterCallState::DecodeTrailers; FilterTrailersStatus status = (*entry)->handle_->decodeTrailers(trailers); @@ -1045,20 +1048,39 @@ void ConnectionManagerImpl::ActiveStream::disarmRequestTimeout() { } std::list::iterator -ConnectionManagerImpl::ActiveStream::commonEncodePrefix(ActiveStreamEncoderFilter* filter, - bool end_stream) { +ConnectionManagerImpl::ActiveStream::commonEncodePrefix( + ActiveStreamEncoderFilter* filter, bool end_stream, + FilterIterationStartState filter_iteration_start_state) { // Only do base state setting on the initial call. Subsequent calls for filtering do not touch // the base state. if (filter == nullptr) { ASSERT(!state_.local_complete_); state_.local_complete_ = end_stream; + return encoder_filters_.begin(); + } + + if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && + (*(filter->entry()))->iterate_from_current_filter_) { + // The filter iteration has been stopped for all frame types, and now the iteration continues. + // The current filter's encoding callback has not be called. Call it now. + return filter->entry(); } + return std::next(filter->entry()); +} +std::list::iterator +ConnectionManagerImpl::ActiveStream::commonDecodePrefix( + ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state) { if (!filter) { - return encoder_filters_.begin(); - } else { - return std::next(filter->entry()); + return decoder_filters_.begin(); } + if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && + (*(filter->entry()))->iterate_from_current_filter_) { + // The filter iteration has been stopped for all frame types, and now the iteration continues. + // The current filter's callback function has not been called. Call it now. + return filter->entry(); + } + return std::next(filter->entry()); } void ConnectionManagerImpl::startDrainSequence() { @@ -1109,7 +1131,8 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( [this](Buffer::Instance& data, bool end_stream) -> void { // TODO: Start encoding from the last decoder filter that saw the // request instead. - encodeData(nullptr, data, end_stream); + encodeData(nullptr, data, end_stream, + FilterIterationStartState::CanStartFromCurrent); }, state_.destroyed_, code, body, grpc_status, is_head_request); } @@ -1125,7 +1148,9 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( // filter. This is simpler than that case because 100 continue implies no // end-stream, and because there are normal headers coming there's no need for // complex continuation logic. - std::list::iterator entry = commonEncodePrefix(filter, false); + // 100-continue filter iteration should always start with the next filter if available. + std::list::iterator entry = + commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext); for (; entry != encoder_filters_.end(); entry++) { ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode100ContinueHeaders)); state_.filter_call_state_ |= FilterCallState::Encode100ContinueHeaders; @@ -1156,7 +1181,9 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte resetIdleTimer(); disarmRequestTimeout(); - std::list::iterator entry = commonEncodePrefix(filter, end_stream); + // Headers filter iteration should always start with the next filter if available. + std::list::iterator entry = + commonEncodePrefix(filter, end_stream, FilterIterationStartState::AlwaysStartFromNext); std::list::iterator continue_data_entry = encoder_filters_.end(); for (; entry != encoder_filters_.end(); entry++) { @@ -1284,9 +1311,11 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end())); if (continue_data_entry != encoder_filters_.end()) { // We use the continueEncoding() code since it will correctly handle not calling - // encodeHeaders() again. Fake setting stopped_ since the continueEncoding() code expects it. + // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code + // expects it. ASSERT(buffered_response_data_); - (*continue_data_entry)->stopped_ = true; + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueEncoding(); } else { // End encoding if this is a header only response, either due to a filter converting it to one @@ -1342,7 +1371,7 @@ void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilt } else if (state_.filter_call_state_ & FilterCallState::EncodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. - encodeData(&filter, data, false); + encodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { // TODO(mattklein123): Formalize error handling for filters and add tests. Should probably // throw an exception here. @@ -1350,8 +1379,9 @@ void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilt } } -void ConnectionManagerImpl::ActiveStream::encodeData(ActiveStreamEncoderFilter* filter, - Buffer::Instance& data, bool end_stream) { +void ConnectionManagerImpl::ActiveStream::encodeData( + ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state) { resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. @@ -1359,11 +1389,17 @@ void ConnectionManagerImpl::ActiveStream::encodeData(ActiveStreamEncoderFilter* return; } - std::list::iterator entry = commonEncodePrefix(filter, end_stream); + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonEncodePrefix(filter, end_stream, filter_iteration_start_state); auto trailers_added_entry = encoder_filters_.end(); const bool trailers_exists_at_start = response_trailers_ != nullptr; for (; entry != encoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame type, return now. + if (handleDataIfStopAll(**entry, data, state_.encoder_filters_streaming_)) { + return; + } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // For details, please see the comment in the ActiveStream::decodeData() function. if ((*entry)->end_stream_) { @@ -1428,8 +1464,14 @@ void ConnectionManagerImpl::ActiveStream::encodeTrailers(ActiveStreamEncoderFilt return; } - std::list::iterator entry = commonEncodePrefix(filter, true); + // Filter iteration may start at the current filter. + std::list::iterator entry = + commonEncodePrefix(filter, true, FilterIterationStartState::CanStartFromCurrent); for (; entry != encoder_filters_.end(); entry++) { + // If the filter pointed by entry has stopped for all frame type, return now. + if ((*entry)->stoppedAll()) { + return; + } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers)); state_.filter_call_state_ |= FilterCallState::EncodeTrailers; FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers); @@ -1457,6 +1499,19 @@ void ConnectionManagerImpl::ActiveStream::maybeEndEncode(bool end_stream) { } } +bool ConnectionManagerImpl::ActiveStream::handleDataIfStopAll(ActiveStreamFilterBase& filter, + Buffer::Instance& data, + bool& filter_streaming) { + if (filter.stoppedAll()) { + ASSERT(!filter.canIterate()); + filter_streaming = + filter.iteration_state_ == ActiveStreamFilterBase::IterationState::StopAllWatermark; + filter.commonHandleBufferData(data); + return true; + } + return false; +} + void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason, absl::string_view) { // NOTE: This function gets called in all of the following cases: // 1) We TX an app level reset @@ -1559,8 +1614,13 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", parent_, static_cast(this)); - ASSERT(stopped_); - stopped_ = false; + ASSERT(!canIterate()); + // If iteration has stopped for all frame types, set iterate_from_current_filter_ to true so the + // filter iteration starts with the current filter instead of the next one. + if (stoppedAll()) { + iterate_from_current_filter_ = true; + } + allowIteration(); // Only resume with do100ContinueHeaders() if we've actually seen a 100-Continue. if (parent_.has_continue_headers_ && !continue_headers_continued_) { @@ -1590,16 +1650,18 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { if (trailers()) { doTrailers(); } + + iterate_from_current_filter_ = false; } bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfter100ContinueHeadersCallback( FilterHeadersStatus status) { ASSERT(parent_.has_continue_headers_); ASSERT(!continue_headers_continued_); - ASSERT(!stopped_); + ASSERT(canIterate()); if (status == FilterHeadersStatus::StopIteration) { - stopped_ = true; + iteration_state_ = IterationState::StopSingleIteration; return false; } else { ASSERT(status == FilterHeadersStatus::Continue); @@ -1611,10 +1673,16 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfter100Continue bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterHeadersCallback( FilterHeadersStatus status, bool& headers_only) { ASSERT(!headers_continued_); - ASSERT(!stopped_); + ASSERT(canIterate()); if (status == FilterHeadersStatus::StopIteration) { - stopped_ = true; + iteration_state_ = IterationState::StopSingleIteration; + return false; + } else if (status == FilterHeadersStatus::StopAllIterationAndBuffer) { + iteration_state_ = IterationState::StopAllBuffer; + return false; + } else if (status == FilterHeadersStatus::StopAllIterationAndWatermark) { + iteration_state_ = IterationState::StopAllWatermark; return false; } else if (status == FilterHeadersStatus::ContinueAndEndStream) { // Set headers_only to true so we know to end early if necessary, @@ -1650,7 +1718,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterDataCallbac FilterDataStatus status, Buffer::Instance& provided_data, bool& buffer_was_streaming) { if (status == FilterDataStatus::Continue) { - if (stopped_) { + if (iteration_state_ == IterationState::StopSingleIteration) { commonHandleBufferData(provided_data); commonContinue(); return false; @@ -1658,7 +1726,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterDataCallbac ASSERT(headers_continued_); } } else { - stopped_ = true; + iteration_state_ = IterationState::StopSingleIteration; if (status == FilterDataStatus::StopIterationAndBuffer || status == FilterDataStatus::StopIterationAndWatermark) { buffer_was_streaming = status == FilterDataStatus::StopIterationAndWatermark; @@ -1675,7 +1743,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterTrailersCal FilterTrailersStatus status) { if (status == FilterTrailersStatus::Continue) { - if (stopped_) { + if (iteration_state_ == IterationState::StopSingleIteration) { commonContinue(); return false; } else { @@ -1751,7 +1819,8 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::addDecodedData(Buffer::In void ConnectionManagerImpl::ActiveStreamDecoderFilter::injectDecodedDataToFilterChain( Buffer::Instance& data, bool end_stream) { - parent_.decodeData(this, data, end_stream); + parent_.decodeData(this, data, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); } @@ -1775,7 +1844,8 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeHeaders(HeaderMapPt void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) { - parent_.encodeData(nullptr, data, end_stream); + parent_.encodeData(nullptr, data, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeTrailers(HeaderMapPtr&& trailers) { @@ -1872,7 +1942,8 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedData(Buffer::In void ConnectionManagerImpl::ActiveStreamEncoderFilter::injectEncodedDataToFilterChain( Buffer::Instance& data, bool end_stream) { - parent_.encodeData(this, data, end_stream); + parent_.encodeData(this, data, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } HeaderMap& ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedTrailers() { @@ -1903,7 +1974,7 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { if (!headers_continued_) { // Make sure we won't end up with nested watermark calls from the body buffer. parent_.state_.encoder_filters_streaming_ = true; - stopped_ = false; + allowIteration(); Http::Utility::sendLocalReply( Grpc::Common::hasGrpcContentType(*parent_.request_headers_), diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 7b4f486144434..a968d1109f7a4 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -95,16 +95,26 @@ class ConnectionManagerImpl : Logger::Loggable, */ struct ActiveStreamFilterBase : public virtual StreamFilterCallbacks { ActiveStreamFilterBase(ActiveStream& parent, bool dual_filter) - : parent_(parent), headers_continued_(false), continue_headers_continued_(false), - stopped_(false), end_stream_(false), dual_filter_(dual_filter) {} + : iteration_state_(IterationState::Continue), iterate_from_current_filter_(false), + parent_(parent), headers_continued_(false), continue_headers_continued_(false), + end_stream_(false), dual_filter_(dual_filter) {} + // Functions in the following block are called after the filter finishes processing + // corresponding data. Those functions handle state updates and data storage (if needed) + // according to the status returned by filter's callback functions. bool commonHandleAfter100ContinueHeadersCallback(FilterHeadersStatus status); bool commonHandleAfterHeadersCallback(FilterHeadersStatus status, bool& headers_only); - void commonHandleBufferData(Buffer::Instance& provided_data); bool commonHandleAfterDataCallback(FilterDataStatus status, Buffer::Instance& provided_data, bool& buffer_was_streaming); bool commonHandleAfterTrailersCallback(FilterTrailersStatus status); + // Buffers provided_data. + void commonHandleBufferData(Buffer::Instance& provided_data); + + // If iteration has stopped for all frame types, calls this function to buffer the data before + // the filter processes data. The function also updates streaming state. + void commonBufferDataIfStopAll(Buffer::Instance& provided_data, bool& buffer_was_streaming); + void commonContinue(); virtual bool canContinue() PURE; virtual Buffer::WatermarkBufferPtr createBuffer() PURE; @@ -128,10 +138,35 @@ class ConnectionManagerImpl : Logger::Loggable, Tracing::Span& activeSpan() override; Tracing::Config& tracingConfig() override; + // Functions to set or get iteration state. + bool canIterate() { return iteration_state_ == IterationState::Continue; } + bool stoppedAll() { + return iteration_state_ == IterationState::StopAllBuffer || + iteration_state_ == IterationState::StopAllWatermark; + } + void allowIteration() { + ASSERT(iteration_state_ != IterationState::Continue); + iteration_state_ = IterationState::Continue; + } + + // The state of iteration. + enum class IterationState { + Continue, // Iteration has not stopped for any frame type. + StopSingleIteration, // Iteration has stopped for headers, 100-continue, or data. + StopAllBuffer, // Iteration has stopped for all frame types, and following data should + // be buffered. + StopAllWatermark, // Iteration has stopped for all frame types, and following data should + // be buffered until high watermark is reached. + }; + IterationState iteration_state_; + // If the filter resumes iteration from a StopAllBuffer/Watermark state, the current filter + // hasn't parsed data and trailers. As a result, the filter iteration should start with the + // current filter instead of the next one. If true, filter iteration starts with the current + // filter. Otherwise, starts with the next filter in the chain. + bool iterate_from_current_filter_; ActiveStream& parent_; bool headers_continued_ : 1; bool continue_headers_continued_ : 1; - bool stopped_ : 1; // If true, end_stream is called for this filter. bool end_stream_ : 1; const bool dual_filter_ : 1; @@ -164,7 +199,8 @@ class ConnectionManagerImpl : Logger::Loggable, parent_.decodeHeaders(this, *parent_.request_headers_, end_stream); } void doData(bool end_stream) override { - parent_.decodeData(this, *parent_.buffered_request_data_, end_stream); + parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } void doTrailers() override { parent_.decodeTrailers(this, *parent_.request_trailers_); } const HeaderMapPtr& trailers() override { return parent_.request_trailers_; } @@ -247,7 +283,8 @@ class ConnectionManagerImpl : Logger::Loggable, parent_.encodeHeaders(this, *parent_.response_headers_, end_stream); } void doData(bool end_stream) override { - parent_.encodeData(this, *parent_.buffered_response_data_, end_stream); + parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, + ActiveStream::FilterIterationStartState::CanStartFromCurrent); } void doTrailers() override { parent_.encodeTrailers(this, *parent_.response_trailers_); } const HeaderMapPtr& trailers() override { return parent_.response_trailers_; } @@ -290,16 +327,28 @@ class ConnectionManagerImpl : Logger::Loggable, ActiveStream(ConnectionManagerImpl& connection_manager); ~ActiveStream(); + // Indicates which filter to start the iteration with. + enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; + void addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, bool dual_filter); void addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, bool dual_filter); void chargeStats(const HeaderMap& headers); + // Returns the encoder filter to start iteration with. std::list::iterator - commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream); + commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream, + FilterIterationStartState filter_iteration_start_state); + // Returns the decoder filter to start iteration with. + std::list::iterator + commonDecodePrefix(ActiveStreamDecoderFilter* filter, + FilterIterationStartState filter_iteration_start_state); const Network::Connection* connection(); void addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming); HeaderMap& addDecodedTrailers(); void decodeHeaders(ActiveStreamDecoderFilter* filter, HeaderMap& headers, bool end_stream); - void decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream); + // Sends data through decoding filter chains. filter_iteration_start_state indicates which + // filter to start the iteration with. + void decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state); void decodeTrailers(ActiveStreamDecoderFilter* filter, HeaderMap& trailers); void disarmRequestTimeout(); void maybeEndDecode(bool end_stream); @@ -311,11 +360,19 @@ class ConnectionManagerImpl : Logger::Loggable, const absl::optional grpc_status); void encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, HeaderMap& headers); void encodeHeaders(ActiveStreamEncoderFilter* filter, HeaderMap& headers, bool end_stream); - void encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream); + // Sends data through encoding filter chains. filter_iteration_start_state indicates which + // filter to start the iteration with. + void encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, + FilterIterationStartState filter_iteration_start_state); void encodeTrailers(ActiveStreamEncoderFilter* filter, HeaderMap& trailers); void encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr); void maybeEndEncode(bool end_stream); uint64_t streamId() { return stream_id_; } + // Returns true if filter has stopped iteration for all frame types. Otherwise, returns false. + // filter_streaming is the variable to indicate if stream is streaming, and its value may be + // changed by the function. + bool handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data, + bool& filter_streaming); // Http::StreamCallbacks void onResetStream(StreamResetReason reason, diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 05a9796879fa2..a282fc88cda50 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -168,22 +168,40 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); } - void setUpEncoderAndDecoder() { + // If request_with_data_and_trailers is true, includes data and trailers in the request. If + // decode_headers_stop_all is true, decoder_filters_[0]'s callback decodeHeaders() returns + // StopAllIterationAndBuffer. + void setUpEncoderAndDecoder(bool request_with_data_and_trailers, bool decode_headers_stop_all) { setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); - HeaderMapPtr headers{ - new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> void { + StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_); + HeaderMapPtr headers{ + new TestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + if (request_with_data_and_trailers) { + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("12345"); + decoder->decodeData(fake_data, false); + + HeaderMapPtr trailers{new TestHeaderMapImpl{{"foo", "bar"}}}; + decoder->decodeTrailers(std::move(trailers)); + } else { + decoder->decodeHeaders(std::move(headers), true); + } + })); setupFilterChain(2, 2); - EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) - .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, _)) + .WillOnce(InvokeWithoutArgs([&, decode_headers_stop_all]() -> FilterHeadersStatus { Buffer::OwnedImpl data("hello"); decoder_filters_[0]->callbacks_->addDecodedData(data, true); - return FilterHeadersStatus::Continue; + if (decode_headers_stop_all) { + return FilterHeadersStatus::StopAllIterationAndBuffer; + } else { + return FilterHeadersStatus::Continue; + } })); EXPECT_CALL(*decoder_filters_[0], decodeComplete()); } @@ -436,7 +454,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFiltersProxyingDisabled) { proxy_100_continue_ = false; setup(false, "envoy-custom-server", false); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); // Akin to 100ContinueResponseWithEncoderFilters below, but with @@ -460,7 +478,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFiltersProxy TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFilters) { proxy_100_continue_ = true; setup(false, "envoy-custom-server", false); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); EXPECT_CALL(*encoder_filters_[0], encode100ContinueHeaders(_)) @@ -483,7 +501,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFilters) { TEST_F(HttpConnectionManagerImplTest, PauseResume100Continue) { proxy_100_continue_ = true; setup(false, "envoy-custom-server", false); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); // Stop the 100-Continue at encoder filter 1. Encoder filter 0 should not yet receive the @@ -2853,7 +2871,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterClearRouteCache) { TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { setup(false, ""); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); // Mimic the upstream connection backing up. The router would call @@ -3018,7 +3036,7 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithL TEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) { initial_buffer_limit_ = 100; setup(false, ""); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); // Check initial limits. @@ -3047,7 +3065,7 @@ TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { initial_buffer_limit_ = 1; streaming_filter_ = true; setup(false, ""); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); // The filter is a streaming filter. Sending 4 bytes should hit the // watermark limit and disable reads on the stream. @@ -3087,7 +3105,7 @@ TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimits) { initial_buffer_limit_ = 10; streaming_filter_ = false; setup(false, ""); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); // Set the filter to be a buffering filter. Sending any data will hit the @@ -3148,7 +3166,7 @@ TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { initial_buffer_limit_ = 10; setup(false, ""); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); // Start the response without processing the request headers through all @@ -3182,7 +3200,7 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) { initial_buffer_limit_ = 10; setup(false, ""); - setUpEncoderAndDecoder(); + setUpEncoderAndDecoder(false, false); sendRequestHeadersAndData(); // Start the response, and make sure the request headers are fully processed. @@ -4016,5 +4034,91 @@ TEST_F(HttpConnectionManagerImplTest, OverlyLongHeadersAcceptedIfConfigured) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request } + +TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathFirstFilter) { + setup(false, "envoy-custom-server", false); + setUpEncoderAndDecoder(true, true); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Verify that once the decoder_filters_[0]'s contineDecoding() is called, decoder_filters_[1]'s + // decodeHeaders() is called, and both filters receive data and trailers consequently. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[0]->callbacks_->continueDecoding(); +} + +TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathSecondFilter) { + setup(false, "envoy-custom-server", false); + setUpEncoderAndDecoder(true, false); + + // Verify headers go through both filters, and data and trailers go through the first filter only. + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, _)) + .WillOnce(Return(FilterHeadersStatus::StopAllIterationAndBuffer)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[0], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + // Verify that once the decoder_filters_[1]'s contineDecoding() is called, both data and trailers + // go through the second filter. + EXPECT_CALL(*decoder_filters_[1], decodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + decoder_filters_[1]->callbacks_->continueDecoding(); +} + +TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPath) { + setup(false, "envoy-custom-server", false); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + + // encoder_filters_[1] is the first filter in the chain. + EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) + .WillOnce(Invoke([&](HeaderMap&, bool) -> FilterHeadersStatus { + return FilterHeadersStatus::StopAllIterationAndBuffer; + })); + HeaderMapPtr response_headers{new TestHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); + + // Invoke encodeData while all iteration is stopped and make sure the filters do not have + // encodeData called. + EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).Times(0); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).Times(0); + Buffer::OwnedImpl response_body("response"); + decoder_filters_[0]->callbacks_->encodeData(response_body, false); + decoder_filters_[0]->callbacks_->encodeTrailers( + HeaderMapPtr{new TestHeaderMapImpl{{"some", "trailer"}}}); + + // Verify that once encoder_filters_[1]'s continueEncoding() is called, encoder_filters_[0]'s + // encodeHeaders() is called, and both filters receive data and trailers consequently. + EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, _)) + .WillOnce(Return(FilterHeadersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(*encoder_filters_[1], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeData(_, _)).WillOnce(Return(FilterDataStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeData(_, _)); + EXPECT_CALL(*encoder_filters_[1], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(*encoder_filters_[0], encodeTrailers(_)) + .WillOnce(Return(FilterTrailersStatus::Continue)); + EXPECT_CALL(response_encoder_, encodeTrailers(_)); + EXPECT_CALL(*encoder_filters_[0], encodeComplete()); + EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + expectOnDestroy(); + encoder_filters_[1]->callbacks_->continueEncoding(); +} } // namespace Http } // namespace Envoy diff --git a/test/integration/BUILD b/test/integration/BUILD index f7103c90ee9b9..6c547da012143 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -251,6 +251,9 @@ envoy_cc_test_library( "//source/extensions/filters/network/http_connection_manager:config", "//test/common/upstream:utility_lib", "//test/integration/filters:add_trailers_filter_config_lib", + "//test/integration/filters:call_decodedata_once_filter_config_lib", + "//test/integration/filters:decode_headers_return_stop_all_filter_config_lib", + "//test/integration/filters:encode_headers_return_stop_all_filter_config_lib", "//test/integration/filters:headers_only_filter_config_lib", "//test/integration/filters:modify_buffer_filter_config_lib", "//test/integration/filters:passthrough_filter_config_lib", diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 5ce3a41914912..a2edd3ed537ef 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -121,3 +121,51 @@ envoy_cc_test_library( "//source/extensions/filters/http/common:pass_through_filter_lib", ], ) + +envoy_cc_test_library( + name = "decode_headers_return_stop_all_filter_config_lib", + srcs = [ + "decode_headers_return_stop_all_filter.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/event:timer_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:empty_http_filter_config_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + +envoy_cc_test_library( + name = "call_decodedata_once_filter_config_lib", + srcs = [ + "call_decodedata_once_filter.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/event:timer_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:empty_http_filter_config_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + +envoy_cc_test_library( + name = "encode_headers_return_stop_all_filter_config_lib", + srcs = [ + "encode_headers_return_stop_all_filter.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/event:timer_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:empty_http_filter_config_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) diff --git a/test/integration/filters/call_decodedata_once_filter.cc b/test/integration/filters/call_decodedata_once_filter.cc new file mode 100644 index 0000000000000..15abc0658fd05 --- /dev/null +++ b/test/integration/filters/call_decodedata_once_filter.cc @@ -0,0 +1,48 @@ +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/http/common/empty_http_filter_config.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/integration/filters/common.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +// A filter that only allows decodeData() to be called once with fixed data length. +class CallDecodeDataOnceFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "call-decodedata-once-filter"; + + Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& header_map, bool) override { + Http::HeaderEntry* entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size")); + Http::HeaderEntry* entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size")); + ASSERT(entry_content != nullptr && entry_added != nullptr); + content_size_ = std::stoul(std::string(entry_content->value().getStringView())); + added_size_ = std::stoul(std::string(entry_added->value().getStringView())); + return Http::FilterHeadersStatus::Continue; + } + + Http::FilterDataStatus decodeData(Buffer::Instance& data, bool) override { + // Request data length (size 5000) + data from addDecodedData() called in dataDecode (size 1). + // Or data from addDecodedData() called in dataTrailers (size 1) + EXPECT_TRUE(data.length() == content_size_ + added_size_ || data.length() == added_size_); + return Http::FilterDataStatus::Continue; + } + + Http::FilterTrailersStatus decodeTrailers(Http::HeaderMap&) override { + return Http::FilterTrailersStatus::Continue; + } + +private: + size_t content_size_ = 0; + size_t added_size_ = 0; +}; + +constexpr char CallDecodeDataOnceFilter::name[]; +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + register_; + +} // namespace Envoy diff --git a/test/integration/filters/decode_headers_return_stop_all_filter.cc b/test/integration/filters/decode_headers_return_stop_all_filter.cc new file mode 100644 index 0000000000000..e2049155ef98a --- /dev/null +++ b/test/integration/filters/decode_headers_return_stop_all_filter.cc @@ -0,0 +1,120 @@ +#include +#include + +#include "envoy/event/timer.h" +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/http/common/empty_http_filter_config.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/integration/filters/common.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +// A filter returns StopAllIterationAndBuffer or StopAllIterationAndWatermark for headers. How the +// filter acts depends on the headers received. +class DecodeHeadersReturnStopAllFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "decode-headers-return-stop-all-filter"; + + // Returns Http::FilterHeadersStatus::StopAllIterationAndBuffer or + // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to + // continue iteration after 5s. + Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& header_map, bool) override { + Http::HeaderEntry* entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size")); + Http::HeaderEntry* entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size")); + ASSERT(entry_content != nullptr && entry_added != nullptr); + content_size_ = std::stoul(std::string(entry_content->value().getStringView())); + added_size_ = std::stoul(std::string(entry_added->value().getStringView())); + Http::HeaderEntry* entry_is_first_trigger = + header_map.get(Envoy::Http::LowerCaseString("is_first_trigger")); + is_first_trigger_ = entry_is_first_trigger != nullptr; + // Remove "first_trigger" headers so that if the filter is registered twice in a filter chain, + // it would act differently. + header_map.remove(Http::LowerCaseString("is_first_trigger")); + + createTimerForContinue(); + + Http::HeaderEntry* entry_buffer = header_map.get(Envoy::Http::LowerCaseString("buffer_limit")); + if (entry_buffer == nullptr || !is_first_trigger_) { + return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + } else { + watermark_enabled_ = true; + buffer_limit_ = std::stoul(std::string(entry_buffer->value().getStringView())); + decoder_callbacks_->setDecoderBufferLimit(buffer_limit_); + header_map.remove(Http::LowerCaseString("buffer_limit")); + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; + } + } + + Http::FilterDataStatus decodeData(Buffer::Instance& data, bool) override { + ASSERT(timer_triggered_); + if (is_first_trigger_) { + if (watermark_enabled_) { + // High watermark reached before all data are received. The rest of the data is sent after + // iteration resumes. + EXPECT_LT(data.length(), content_size_); + } else { + // decodeData will only be called once after iteration resumes. + EXPECT_EQ(data.length(), content_size_); + } + Buffer::OwnedImpl added_data(std::string(added_size_, 'a')); + decoder_callbacks_->addDecodedData(added_data, false); + } else { + EXPECT_TRUE(data.length() == content_size_ + added_size_ || + data.length() == content_size_ + added_size_ * 2); + } + return Http::FilterDataStatus::Continue; + } + + Http::FilterTrailersStatus decodeTrailers(Http::HeaderMap&) override { + ASSERT(timer_triggered_); + if (is_first_trigger_) { + Buffer::OwnedImpl data(std::string(added_size_, 'a')); + decoder_callbacks_->addDecodedData(data, false); + } + return Http::FilterTrailersStatus::Continue; + } + +private: + // Creates a timer to continue iteration after conditions meet. + void createTimerForContinue() { + delay_timer_ = decoder_callbacks_->dispatcher().createTimer([this]() -> void { + // If decodeHeaders() returns StopAllIterationAndBuffer, triggers the timer when all the + // request data has been received. If decodeHeaders() returns StopAllIterationAndWatermark, + // triggers the timer when received data exceed buffer limit. + if ((content_size_ > 0 && + decoder_callbacks_->streamInfo().bytesReceived() >= content_size_) || + (watermark_enabled_ && buffer_limit_ > 0 && + decoder_callbacks_->streamInfo().bytesReceived() >= buffer_limit_)) { + timer_triggered_ = true; + decoder_callbacks_->continueDecoding(); + } else { + // Create a new timer to try again later. + createTimerForContinue(); + } + }); + delay_timer_->enableTimer(std::chrono::milliseconds(500)); + } + + Event::TimerPtr delay_timer_; + bool timer_triggered_ = false; + size_t content_size_ = 0; + size_t added_size_ = 0; + size_t buffer_limit_ = 0; + bool watermark_enabled_ = false; + bool is_first_trigger_ = false; +}; + +constexpr char DecodeHeadersReturnStopAllFilter::name[]; +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + register_; + +} // namespace Envoy diff --git a/test/integration/filters/encode_headers_return_stop_all_filter.cc b/test/integration/filters/encode_headers_return_stop_all_filter.cc new file mode 100644 index 0000000000000..559121efea37e --- /dev/null +++ b/test/integration/filters/encode_headers_return_stop_all_filter.cc @@ -0,0 +1,93 @@ +#include +#include + +#include "envoy/event/timer.h" +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/http/common/empty_http_filter_config.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/integration/filters/common.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +// A filter returns StopAllIterationAndBuffer or StopAllIterationAndWatermark for headers. The +// iteration continues after 5s. +class EncodeHeadersReturnStopAllFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "encode-headers-return-stop-all-filter"; + + // Returns Http::FilterHeadersStatus::StopAllIterationAndBuffer or + // Http::FilterHeadersStatus::StopAllIterationAndWatermark for headers. Triggers a timer to + // continue iteration after 5s. + Http::FilterHeadersStatus encodeHeaders(Http::HeaderMap& header_map, bool) override { + Http::HeaderEntry* entry_content = header_map.get(Envoy::Http::LowerCaseString("content_size")); + Http::HeaderEntry* entry_added = header_map.get(Envoy::Http::LowerCaseString("added_size")); + ASSERT(entry_content != nullptr && entry_added != nullptr); + content_size_ = std::stoul(std::string(entry_content->value().getStringView())); + added_size_ = std::stoul(std::string(entry_added->value().getStringView())); + + createTimerForContinue(); + + Http::HeaderEntry* entry_buffer = header_map.get(Envoy::Http::LowerCaseString("buffer_limit")); + if (entry_buffer == nullptr) { + return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + } else { + watermark_enabled_ = true; + encoder_callbacks_->setEncoderBufferLimit( + std::stoul(std::string(entry_buffer->value().getStringView()))); + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; + } + } + + Http::FilterDataStatus encodeData(Buffer::Instance& data, bool) override { + ASSERT(timer_triggered_); + if (watermark_enabled_) { + // High watermark reached before all data are received. The rest of the data is sent after + // iteration resumes. + EXPECT_LT(data.length(), content_size_); + } else { + // encodeData will only be called once after iteration resumes. + EXPECT_EQ(data.length(), content_size_); + } + Buffer::OwnedImpl added_data(std::string(added_size_, 'a')); + encoder_callbacks_->addEncodedData(added_data, false); + return Http::FilterDataStatus::Continue; + } + + Http::FilterTrailersStatus encodeTrailers(Http::HeaderMap&) override { + ASSERT(timer_triggered_); + Buffer::OwnedImpl data(std::string(added_size_, 'a')); + encoder_callbacks_->addEncodedData(data, false); + return Http::FilterTrailersStatus::Continue; + } + +private: + // Creates a timer to continue iteration after 5s. + void createTimerForContinue() { + delay_timer_ = encoder_callbacks_->dispatcher().createTimer([this]() -> void { + timer_triggered_ = true; + encoder_callbacks_->continueEncoding(); + }); + delay_timer_->enableTimer(std::chrono::seconds(5)); + } + + Event::TimerPtr delay_timer_; + bool timer_triggered_ = false; + size_t added_size_ = 0; + size_t content_size_ = 0; + bool watermark_enabled_ = false; +}; + +constexpr char EncodeHeadersReturnStopAllFilter::name[]; +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + register_; + +} // namespace Envoy diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 33b9e1ac84823..5515391c010a8 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -53,7 +53,33 @@ void setDoNotValidateRouteConfig( // This is useful for things which will likely not differ based on upstream // behavior, for example "how does Envoy handle duplicate content lengths from // downstream"? -typedef HttpProtocolIntegrationTest DownstreamProtocolIntegrationTest; +class DownstreamProtocolIntegrationTest : public HttpProtocolIntegrationTest { +protected: + void changeHeadersForStopAllTests(Http::TestHeaderMapImpl& headers, bool set_buffer_limit) { + headers.addCopy("content_size", std::to_string(count_ * size_)); + headers.addCopy("added_size", std::to_string(added_decoded_data_size_)); + headers.addCopy("is_first_trigger", "value"); + if (set_buffer_limit) { + headers.addCopy("buffer_limit", std::to_string(buffer_limit_)); + } + } + + void verifyUpStreamRequestAfterStopAllFilter() { + if (downstreamProtocol() == Http::CodecClient::Type::HTTP2) { + // decode-headers-return-stop-all-filter calls addDecodedData in decodeData and + // decodeTrailers. 2 decoded data were added. + EXPECT_EQ(count_ * size_ + added_decoded_data_size_ * 2, upstream_request_->bodyLength()); + } else { + EXPECT_EQ(count_ * size_ + added_decoded_data_size_ * 1, upstream_request_->bodyLength()); + } + EXPECT_EQ(true, upstream_request_->complete()); + } + + const int count_ = 70; + const int size_ = 1000; + const int added_decoded_data_size_ = 1; + const int buffer_limit_ = 100; +}; // Tests for ProtocolIntegrationTest will be run with the full mesh of H1/H2 // downstream and H1/H2 upstreams. @@ -755,6 +781,230 @@ TEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersAccepted) { testLargeRequestHeaders(95, 96); } +// Tests StopAllIterationAndBuffer. Verifies decode-headers-return-stop-all-filter calls decodeData +// once after iteration is resumed. +TEST_P(DownstreamProtocolIntegrationTest, testDecodeHeadersReturnsStopAll) { + config_helper_.addFilter(R"EOF( +name: call-decodedata-once-filter +)EOF"); + config_helper_.addFilter(R"EOF( +name: decode-headers-return-stop-all-filter +)EOF"); + config_helper_.addFilter(R"EOF( +name: passthrough-filter +)EOF"); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Sends a request with headers and data. + changeHeadersForStopAllTests(default_request_headers_, false); + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + for (int i = 0; i < count_ - 1; i++) { + codec_client_->sendData(*request_encoder_, size_, false); + } + // Sleeps for 1s in order to be consistent with testDecodeHeadersReturnsStopAllWatermark. + sleep(1); + codec_client_->sendData(*request_encoder_, size_, true); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength()); + EXPECT_EQ(true, upstream_request_->complete()); + + // Sends a request with headers, data, and trailers. + auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder_2.first; + response = std::move(encoder_decoder_2.second); + for (int i = 0; i < count_; i++) { + codec_client_->sendData(*request_encoder_, size_, false); + } + Http::TestHeaderMapImpl request_trailers{{"trailer", "trailer"}}; + codec_client_->sendTrailers(*request_encoder_, request_trailers); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + verifyUpStreamRequestAfterStopAllFilter(); +} + +// Tests StopAllIterationAndWatermark. decode-headers-return-stop-all-watermark-filter sets buffer +// limit to 100. Verifies data pause when limit is reached, and resume after iteration continues. +TEST_P(DownstreamProtocolIntegrationTest, testDecodeHeadersReturnsStopAllWatermark) { + config_helper_.addFilter(R"EOF( +name: decode-headers-return-stop-all-filter +)EOF"); + config_helper_.addFilter(R"EOF( +name: passthrough-filter +)EOF"); + + // Sets initial stream window to min value to make the client sensitive to a low watermark. + config_helper_.addConfigModifier( + [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) + -> void { + hcm.mutable_http2_protocol_options()->mutable_initial_stream_window_size()->set_value( + Http::Http2Settings::MIN_INITIAL_STREAM_WINDOW_SIZE); + }); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Sends a request with headers and data. + changeHeadersForStopAllTests(default_request_headers_, true); + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + for (int i = 0; i < count_ - 1; i++) { + codec_client_->sendData(*request_encoder_, size_, false); + } + // Gives buffer 1s to react to buffer limit. + sleep(1); + codec_client_->sendData(*request_encoder_, size_, true); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength()); + EXPECT_EQ(true, upstream_request_->complete()); + + // Sends a request with headers, data, and trailers. + auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder_2.first; + response = std::move(encoder_decoder_2.second); + for (int i = 0; i < count_ - 1; i++) { + codec_client_->sendData(*request_encoder_, size_, false); + } + // Gives buffer 1s to react to buffer limit. + sleep(1); + codec_client_->sendData(*request_encoder_, size_, false); + Http::TestHeaderMapImpl request_trailers{{"trailer", "trailer"}}; + codec_client_->sendTrailers(*request_encoder_, request_trailers); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + verifyUpStreamRequestAfterStopAllFilter(); +} + +// Test two filters that return StopAllIterationAndBuffer back-to-back. +TEST_P(DownstreamProtocolIntegrationTest, testTwoFiltersDecodeHeadersReturnsStopAll) { + config_helper_.addFilter(R"EOF( +name: decode-headers-return-stop-all-filter +)EOF"); + config_helper_.addFilter(R"EOF( +name: decode-headers-return-stop-all-filter +)EOF"); + config_helper_.addFilter(R"EOF( +name: passthrough-filter +)EOF"); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Sends a request with headers and data. + changeHeadersForStopAllTests(default_request_headers_, false); + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + for (int i = 0; i < count_ - 1; i++) { + codec_client_->sendData(*request_encoder_, size_, false); + } + codec_client_->sendData(*request_encoder_, size_, true); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ(count_ * size_ + added_decoded_data_size_, upstream_request_->bodyLength()); + EXPECT_EQ(true, upstream_request_->complete()); + + // Sends a request with headers, data, and trailers. + auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder_2.first; + response = std::move(encoder_decoder_2.second); + for (int i = 0; i < count_; i++) { + codec_client_->sendData(*request_encoder_, size_, false); + } + Http::TestHeaderMapImpl request_trailers{{"trailer", "trailer"}}; + codec_client_->sendTrailers(*request_encoder_, request_trailers); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + verifyUpStreamRequestAfterStopAllFilter(); +} + +// Tests encodeHeaders() returns StopAllIterationAndBuffer. +TEST_P(DownstreamProtocolIntegrationTest, testEncodeHeadersReturnsStopAll) { + config_helper_.addFilter(R"EOF( +name: encode-headers-return-stop-all-filter +)EOF"); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Upstream responds with headers, data and trailers. + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10); + waitForNextUpstreamRequest(); + + changeHeadersForStopAllTests(default_response_headers_, false); + upstream_request_->encodeHeaders(default_response_headers_, false); + for (int i = 0; i < count_ - 1; i++) { + upstream_request_->encodeData(size_, false); + } + // Sleeps for 1s in order to be consistent with testEncodeHeadersReturnsStopAllWatermark. + sleep(1); + upstream_request_->encodeData(size_, false); + Http::TestHeaderMapImpl response_trailers{{"response", "trailer"}}; + upstream_request_->encodeTrailers(response_trailers); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ(count_ * size_ + added_decoded_data_size_, response->body().size()); +} + +// Tests encodeHeaders() returns StopAllIterationAndWatermark. +TEST_P(DownstreamProtocolIntegrationTest, testEncodeHeadersReturnsStopAllWatermark) { + config_helper_.addFilter(R"EOF( +name: encode-headers-return-stop-all-filter +)EOF"); + + // Sets initial stream window to min value to make the upstream sensitive to a low watermark. + config_helper_.addConfigModifier( + [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) + -> void { + hcm.mutable_http2_protocol_options()->mutable_initial_stream_window_size()->set_value( + Http::Http2Settings::MIN_INITIAL_STREAM_WINDOW_SIZE); + }); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Upstream responds with headers, data and trailers. + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10); + waitForNextUpstreamRequest(); + + changeHeadersForStopAllTests(default_response_headers_, true); + upstream_request_->encodeHeaders(default_response_headers_, false); + for (int i = 0; i < count_ - 1; i++) { + upstream_request_->encodeData(size_, false); + } + // Gives buffer 1s to react to buffer limit. + sleep(1); + upstream_request_->encodeData(size_, false); + Http::TestHeaderMapImpl response_trailers{{"response", "trailer"}}; + upstream_request_->encodeTrailers(response_trailers); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ(count_ * size_ + added_decoded_data_size_, response->body().size()); +} + // For tests which focus on downstream-to-Envoy behavior, and don't need to be // run with both HTTP/1 and HTTP/2 upstreams. INSTANTIATE_TEST_SUITE_P(Protocols, DownstreamProtocolIntegrationTest, From c398e4e6abc6fc58731dc1575e1ad10e2606f123 Mon Sep 17 00:00:00 2001 From: Douglas Reid Date: Sat, 6 Apr 2019 10:58:00 -0700 Subject: [PATCH 070/165] fix(tracing): allow 256 chars in path tag (#6492) Signed-off-by: Douglas Reid --- source/common/tracing/http_tracer_impl.cc | 2 +- test/common/tracing/http_tracer_impl_test.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index debad7d5b848e..1070ac3165cb2 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -30,7 +30,7 @@ static std::string buildUrl(const Http::HeaderMap& request_headers) { std::string path = request_headers.EnvoyOriginalPath() ? request_headers.EnvoyOriginalPath()->value().c_str() : request_headers.Path()->value().c_str(); - static const size_t max_path_length = 128; + static const size_t max_path_length = 256; if (path.length() > max_path_length) { path = path.substr(0, max_path_length); } diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index 6c1c117d5b081..b939c516483a5 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -114,7 +114,7 @@ TEST(HttpTracerUtilityTest, IsTracing) { TEST(HttpConnManFinalizerImpl, OriginalAndLongPath) { const std::string path(300, 'a'); const std::string path_prefix = "http://"; - const std::string expected_path(128, 'a'); + const std::string expected_path(256, 'a'); NiceMock span; Http::TestHeaderMapImpl request_headers{{"x-request-id", "id"}, @@ -142,7 +142,7 @@ TEST(HttpConnManFinalizerImpl, OriginalAndLongPath) { TEST(HttpConnManFinalizerImpl, NoGeneratedId) { const std::string path(300, 'a'); const std::string path_prefix = "http://"; - const std::string expected_path(128, 'a'); + const std::string expected_path(256, 'a'); NiceMock span; Http::TestHeaderMapImpl request_headers{ From 0d087da79cf0d888e1658df40e04bfd39887482c Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Sat, 6 Apr 2019 10:58:21 -0700 Subject: [PATCH 071/165] coverage: exclude chromium_url (#6498) Signed-off-by: Lizan Zhou --- test/run_envoy_bazel_coverage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 21a10464b774b..bed4baa803c48 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -61,7 +61,7 @@ BAZEL_TEST_OPTIONS="${BAZEL_TEST_OPTIONS} -c dbg --copt=-DNDEBUG" # stats. The #foo# pattern is because gcov produces files such as # bazel-out#local-fastbuild#bin#external#spdlog_git#_virtual_includes#spdlog#spdlog#details#pattern_formatter_impl.h.gcov. # To find these while modifying this regex, perform a gcov run with -k set. -[[ -z "${GCOVR_EXCLUDE_REGEX}" ]] && GCOVR_EXCLUDE_REGEX=".*pb.h.gcov|.*#genfiles#.*|test#.*|external#.*|.*#external#.*|.*#prebuilt#.*|.*#config_validation#.*" +[[ -z "${GCOVR_EXCLUDE_REGEX}" ]] && GCOVR_EXCLUDE_REGEX=".*pb.h.gcov|.*#genfiles#.*|test#.*|external#.*|.*#external#.*|.*#prebuilt#.*|.*#config_validation#.*|.*#chromium_url#.*" [[ -z "${GCOVR_EXCLUDE_DIR}" ]] && GCOVR_EXCLUDE_DIR=".*/external/.*" COVERAGE_DIR="${SRCDIR}"/generated/coverage From 8cef5e32805324c7942653928deb28c9b9851b52 Mon Sep 17 00:00:00 2001 From: James Synge Date: Sun, 7 Apr 2019 23:03:29 -0400 Subject: [PATCH 072/165] build: Change namespace of chromium_url. (#6506) Rename namespace url to chromium_url. Avoids link-time collisions in binaries which link in Envoy and the original chromium URL library. Fix format of chromium_url/README.md Signed-off-by: James Synge --- source/common/chromium_url/README.md | 1 + source/common/chromium_url/url_canon.cc | 4 ++-- source/common/chromium_url/url_canon.h | 4 ++-- source/common/chromium_url/url_canon_internal.cc | 4 ++-- source/common/chromium_url/url_canon_internal.h | 4 ++-- source/common/chromium_url/url_canon_path.cc | 4 ++-- source/common/chromium_url/url_canon_stdstring.cc | 4 ++-- source/common/chromium_url/url_canon_stdstring.h | 4 ++-- source/common/chromium_url/url_parse.h | 4 ++-- source/common/chromium_url/url_parse_internal.h | 4 ++-- source/common/http/path_utility.cc | 9 +++++---- 11 files changed, 24 insertions(+), 22 deletions(-) diff --git a/source/common/chromium_url/README.md b/source/common/chromium_url/README.md index 64d28b315dd20..32e251c82d4d2 100644 --- a/source/common/chromium_url/README.md +++ b/source/common/chromium_url/README.md @@ -5,6 +5,7 @@ to support a security release fix for CVE-2019-9901. Long term we need this to be moved to absl or QUICHE for upgrades and long-term support. Some specific transforms of interest: +* The namespace `url` was changed to `chromium_url`. * `url_parse.h` is minified to just `Component` and flattened back into the URL directory. It does not contain any non-Chromium authored code any longer and so does not have a separate LICENSE. diff --git a/source/common/chromium_url/url_canon.cc b/source/common/chromium_url/url_canon.cc index 91926b6f237b6..b9ad1b829726c 100644 --- a/source/common/chromium_url/url_canon.cc +++ b/source/common/chromium_url/url_canon.cc @@ -9,8 +9,8 @@ #include "common/chromium_url/envoy_shim.h" -namespace url { +namespace chromium_url { template class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT; -} // namespace url +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon.h b/source/common/chromium_url/url_canon.h index 0f66374c60c4f..89a11bb0418b7 100644 --- a/source/common/chromium_url/url_canon.h +++ b/source/common/chromium_url/url_canon.h @@ -14,7 +14,7 @@ #include "common/chromium_url/envoy_shim.h" #include "common/chromium_url/url_parse.h" -namespace url { +namespace chromium_url { // Canonicalizer output ------------------------------------------------------- @@ -181,6 +181,6 @@ COMPONENT_EXPORT(URL) bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, Component* out_path); -} // namespace url +} // namespace chromium_url #endif // URL_URL_CANON_H_ diff --git a/source/common/chromium_url/url_canon_internal.cc b/source/common/chromium_url/url_canon_internal.cc index 7aeb4f3de1b88..38c932cad5b47 100644 --- a/source/common/chromium_url/url_canon_internal.cc +++ b/source/common/chromium_url/url_canon_internal.cc @@ -7,7 +7,7 @@ #include "common/chromium_url/url_canon_internal.h" -namespace url { +namespace chromium_url { // See the header file for this array's declaration. const unsigned char kSharedCharTypeTable[0x100] = { @@ -292,4 +292,4 @@ const char kCharToHexLookup[8] = { 0, // 0xE0 - 0xFF }; -} // namespace url +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h index 63960665fc682..bffff5c12f4a0 100644 --- a/source/common/chromium_url/url_canon_internal.h +++ b/source/common/chromium_url/url_canon_internal.h @@ -19,7 +19,7 @@ #include "common/chromium_url/envoy_shim.h" #include "common/chromium_url/url_canon.h" -namespace url { +namespace chromium_url { // Character type handling ----------------------------------------------------- @@ -241,6 +241,6 @@ inline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* return true; } -} // namespace url +} // namespace chromium_url #endif // URL_URL_CANON_INTERNAL_H_ diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc index 2e13dc0cf8c87..f8c803a9c5f5a 100644 --- a/source/common/chromium_url/url_canon_path.cc +++ b/source/common/chromium_url/url_canon_path.cc @@ -11,7 +11,7 @@ #include "common/chromium_url/url_canon_internal.h" #include "common/chromium_url/url_parse_internal.h" -namespace url { +namespace chromium_url { namespace { @@ -414,4 +414,4 @@ bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* outp return DoPath(spec, path, output, out_path); } -} // namespace url +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.cc b/source/common/chromium_url/url_canon_stdstring.cc index dc501d66ec26b..0c61831e5f1ac 100644 --- a/source/common/chromium_url/url_canon_stdstring.cc +++ b/source/common/chromium_url/url_canon_stdstring.cc @@ -7,7 +7,7 @@ #include "common/chromium_url/url_canon_stdstring.h" -namespace url { +namespace chromium_url { StdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) { cur_len_ = static_cast(str_->size()); // Append to existing data. @@ -30,4 +30,4 @@ void StdStringCanonOutput::Resize(int sz) { buffer_len_ = sz; } -} // namespace url +} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.h b/source/common/chromium_url/url_canon_stdstring.h index e502b1a3e6f55..e14d6c22e74e8 100644 --- a/source/common/chromium_url/url_canon_stdstring.h +++ b/source/common/chromium_url/url_canon_stdstring.h @@ -21,7 +21,7 @@ TypeName(const TypeName&) = delete; \ TypeName& operator=(const TypeName&) = delete -namespace url { +namespace chromium_url { // Write into a std::string given in the constructor. This object does not own // the string itself, and the user must ensure that the string stays alive @@ -53,6 +53,6 @@ class COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput { DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput); }; -} // namespace url +} // namespace chromium_url #endif // URL_URL_CANON_STDSTRING_H_ diff --git a/source/common/chromium_url/url_parse.h b/source/common/chromium_url/url_parse.h index 31d7d3f16c1e5..b840af60438d1 100644 --- a/source/common/chromium_url/url_parse.h +++ b/source/common/chromium_url/url_parse.h @@ -8,7 +8,7 @@ #ifndef URL_PARSE_H_ #define URL_PARSE_H_ -namespace url { +namespace chromium_url { // Component ------------------------------------------------------------------ @@ -44,6 +44,6 @@ struct Component { // points. The ending point is non-inclusive. inline Component MakeRange(int begin, int end) { return Component(begin, end - begin); } -} // namespace url +} // namespace chromium_url #endif // URL_PARSE_H_ diff --git a/source/common/chromium_url/url_parse_internal.h b/source/common/chromium_url/url_parse_internal.h index a8c15819048be..0ca47bc488461 100644 --- a/source/common/chromium_url/url_parse_internal.h +++ b/source/common/chromium_url/url_parse_internal.h @@ -8,11 +8,11 @@ #ifndef URL_URL_PARSE_INTERNAL_H_ #define URL_URL_PARSE_INTERNAL_H_ -namespace url { +namespace chromium_url { // We treat slashes and backslashes the same for IE compatibility. inline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\'; } -} // namespace url +} // namespace chromium_url #endif // URL_URL_PARSE_INTERNAL_H_ diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index 796c2c1cbd52b..74af7039a4bd4 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -13,10 +13,11 @@ namespace Http { namespace { absl::optional canonicalizePath(absl::string_view original_path) { std::string canonical_path; - url::Component in_component(0, original_path.size()); - url::Component out_component; - url::StdStringCanonOutput output(&canonical_path); - if (!url::CanonicalizePath(original_path.data(), in_component, &output, &out_component)) { + chromium_url::Component in_component(0, original_path.size()); + chromium_url::Component out_component; + chromium_url::StdStringCanonOutput output(&canonical_path); + if (!chromium_url::CanonicalizePath(original_path.data(), in_component, &output, + &out_component)) { return absl::nullopt; } else { output.Complete(); From 93c0fbb0db67e02427905c20aa325b587c4afe51 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 8 Apr 2019 08:35:35 +0530 Subject: [PATCH 073/165] remove idle timeout validation (#6500) Signed-off-by: Rama Chavali --- api/envoy/api/v2/route/route.proto | 3 +-- test/common/router/config_impl_test.cc | 6 ++++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index 2d2b56ae67d80..f0bda5fd8ace6 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -574,8 +574,7 @@ message RouteAction { // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. - google.protobuf.Duration idle_timeout = 24 - [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + google.protobuf.Duration idle_timeout = 24 [(gogoproto.stdduration) = true]; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index f17562c6e3e2b..dc2eaf2f178e6 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -5036,8 +5036,10 @@ name: ZeroIdleTimeout idle_timeout: 0s )EOF"; - EXPECT_THROW_WITH_REGEX(parseRouteConfigurationFromV2Yaml(ZeroIdleTimeout), EnvoyException, - "value must be greater than \" \"0s"); + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ZeroIdleTimeout), factory_context_, true); + Http::TestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); + EXPECT_EQ(0, route_entry->idleTimeout().value().count()); } TEST_F(RouteConfigurationV2, ExplicitIdleTimeout) { From 22a9b8c57187925a82b46b922bad3dbb9b7c8166 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Sun, 7 Apr 2019 20:25:52 -0700 Subject: [PATCH 074/165] test: router upstream log to v2 config stubs (#6499) Signed-off-by: Derek Argueta --- test/common/router/BUILD | 1 - .../common/router/router_upstream_log_test.cc | 34 +++++++++---------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 08811d0e06653..de2a36450bfa3 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -179,7 +179,6 @@ envoy_cc_test( external_deps = ["abseil_optional"], deps = [ "//source/common/buffer:buffer_lib", - "//source/common/config:filter_json_lib", "//source/common/network:utility_lib", "//source/common/router:router_lib", "//source/common/upstream:upstream_includes", diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index e9e849a38c2cd..0e41d0442996f 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -1,7 +1,6 @@ #include #include -#include "common/config/filter_json.h" #include "common/network/utility.h" #include "common/router/router.h" #include "common/upstream/upstream_impl.h" @@ -34,17 +33,18 @@ namespace { absl::optional testUpstreamLog() { // Custom format without timestamps or durations. - const std::string json_string = R"EOF( - { - "path": "/dev/null", - "format": "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %REQ(:AUTHORITY)% %UPSTREAM_HOST% %RESP(X-UPSTREAM-HEADER)% %TRAILER(X-TRAILER)%\n" - } + const std::string yaml = R"EOF( +name: envoy.file_access_log +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + format: "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% %RESPONSE_CODE% + %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %REQ(:AUTHORITY)% %UPSTREAM_HOST% + %RESP(X-UPSTREAM-HEADER)% %TRAILER(X-TRAILER)%\n" + path: "/dev/null" )EOF"; - auto json_object_ptr = Json::Factory::loadFromString(json_string); - envoy::config::filter::accesslog::v2::AccessLog upstream_log; - Envoy::Config::FilterJson::translateAccessLog(*json_object_ptr, upstream_log); + MessageUtil::loadFromYaml(yaml, upstream_log); return absl::optional(upstream_log); } @@ -256,17 +256,17 @@ TEST_F(RouterUpstreamLogTest, LogHeaders) { // Test timestamps and durations are emitted. TEST_F(RouterUpstreamLogTest, LogTimestampsAndDurations) { - const std::string json_string = R"EOF( - { - "path": "/dev/null", - "format": "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% %DURATION% %RESPONSE_DURATION% %REQUEST_DURATION%" - } + const std::string yaml = R"EOF( +name: envoy.file_access_log +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + format: "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% + %DURATION% %RESPONSE_DURATION% %REQUEST_DURATION%" + path: "/dev/null" )EOF"; - auto json_object_ptr = Json::Factory::loadFromString(json_string); - envoy::config::filter::accesslog::v2::AccessLog upstream_log; - Envoy::Config::FilterJson::translateAccessLog(*json_object_ptr, upstream_log); + MessageUtil::loadFromYaml(yaml, upstream_log); init(absl::optional(upstream_log)); run(200, {{"x-envoy-original-path", "/foo"}}, {}, {}); From 378d59b43e71e07c36c9ef60b0ee7318ca7f510f Mon Sep 17 00:00:00 2001 From: Mitch Sukalski Date: Mon, 8 Apr 2019 10:09:49 -0700 Subject: [PATCH 075/165] redis: add moved/ask redirection support (#6294) - Redis requests are now redirected to a new upstream connection upon receipt of an MOVED or ASK error response. If the request cannot be redirected, then the error is passed downstream unmodified. The redirection IP (IPv4 or IPv6) address and TCP port specified in the Redis server error does not need to reference a known host of the cluster associated with the redis_proxy filter. - added an enable_redirection boolean to the redis proxy connection pool settings to control whether or not server redirection errors are honored or passed downstream unchanged. - RespValue copy constructor, copy assignment, and equality testing methods for easier manipulation of RespValues. - added cluster statistics, upstream_internal_redirect_succeeded_total and upstream_internal_redirect_failed_total in ClientImpl::onRespValue() callback - extended unit tests for Redis connection pool, client, command splitter, and RespValue copying and equallity testing. - new basic integration test for redis_proxy: simple request and response, and invalid request testing (enable_redirection enabled). Signed-off-by: Mitch Sukalski --- .../network/redis_proxy/v2/redis_proxy.proto | 7 + .../filters/network/common/redis/client.h | 13 + .../network/common/redis/client_impl.cc | 30 +- .../network/common/redis/client_impl.h | 10 + .../filters/network/common/redis/codec.h | 5 + .../network/common/redis/codec_impl.cc | 77 +++ .../filters/network/redis_proxy/BUILD | 2 + .../network/redis_proxy/command_splitter.h | 6 +- .../redis_proxy/command_splitter_impl.cc | 278 ++++++-- .../redis_proxy/command_splitter_impl.h | 43 +- .../filters/network/redis_proxy/conn_pool.h | 14 + .../network/redis_proxy/conn_pool_impl.cc | 98 +++ .../network/redis_proxy/conn_pool_impl.h | 10 + .../network/redis_proxy/proxy_filter.cc | 2 +- .../extensions/health_checkers/redis/redis.cc | 8 + .../extensions/health_checkers/redis/redis.h | 4 + test/config/utility.cc | 20 +- .../network/common/redis/client_impl_test.cc | 234 +++++++ .../network/common/redis/codec_impl_test.cc | 99 +++ .../filters/network/common/redis/mocks.cc | 34 - .../filters/network/common/redis/mocks.h | 2 +- .../filters/network/common/redis/test_utils.h | 8 +- .../redis_proxy/command_lookup_speed_test.cc | 18 +- .../redis_proxy/command_splitter_impl_test.cc | 621 ++++++++++++++++-- .../redis_proxy/conn_pool_impl_test.cc | 121 +++- .../filters/network/redis_proxy/mocks.h | 8 +- .../redis_proxy_integration_test.cc | 316 +++++++-- .../health_checkers/redis/redis_test.cc | 41 ++ 28 files changed, 1909 insertions(+), 220 deletions(-) diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index cd8c18b128755..23448eff903f9 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -46,6 +46,13 @@ message RedisProxy { // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream bool enable_hashtagging = 2; + + // Accept `moved and ask redirection + // `_ errors from upstream + // redis servers, and retry commands to the specified target server. The target server does not + // need to be known to the cluster manager. If the command cannot be redirected, then the + // original error is passed downstream unchanged. By default, this support is not enabled. + bool enable_redirection = 3; } // Network settings for the connection pool to the upstream cluster. diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h index e7dde1a2f0b92..59c5c88080c9b 100644 --- a/source/extensions/filters/network/common/redis/client.h +++ b/source/extensions/filters/network/common/redis/client.h @@ -41,6 +41,13 @@ class PoolCallbacks { * Called when a network/protocol error occurs and there is no response. */ virtual void onFailure() PURE; + + /** + * Called when a MOVED or ASK redirection error is received, and the request must be retried. + * @param value supplies the MOVED error response + * @return bool true if the request is successfully redirected, false otherwise + */ + virtual bool onRedirection(const Common::Redis::RespValue& value) PURE; }; /** @@ -97,6 +104,12 @@ class Config { * same hash tag will be forwarded to the same upstream. */ virtual bool enableHashtagging() const PURE; + + /** + * @return when enabled, moved/ask redirection errors from upstream redis servers will be + * processed. + */ + virtual bool enableRedirection() const PURE; }; /** diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc index a7d778f8d8b30..1040036560488 100644 --- a/source/extensions/filters/network/common/redis/client_impl.cc +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -10,7 +10,8 @@ namespace Client { ConfigImpl::ConfigImpl( const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) : op_timeout_(PROTOBUF_GET_MS_REQUIRED(config, op_timeout)), - enable_hashtagging_(config.enable_hashtagging()) {} + enable_hashtagging_(config.enable_hashtagging()), + enable_redirection_(config.enable_redirection()) {} ClientPtr ClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, EncoderPtr&& encoder, DecoderFactory& decoder_factory, @@ -137,15 +138,34 @@ void ClientImpl::onEvent(Network::ConnectionEvent event) { void ClientImpl::onRespValue(RespValuePtr&& value) { ASSERT(!pending_requests_.empty()); PendingRequest& request = pending_requests_.front(); - if (!request.canceled_) { - request.callbacks_.onResponse(std::move(value)); - } else { + + if (request.canceled_) { host_->cluster().stats().upstream_rq_cancelled_.inc(); + } else if (config_.enableRedirection() && (value->type() == Common::Redis::RespType::Error)) { + std::vector err = StringUtil::splitToken(value->asString(), " ", false); + bool redirected = false; + if (err.size() == 3) { + if (err[0] == RedirectionResponse::get().MOVED || err[0] == RedirectionResponse::get().ASK) { + redirected = request.callbacks_.onRedirection(*value); + if (redirected) { + host_->cluster().stats().upstream_internal_redirect_succeeded_total_.inc(); + } else { + host_->cluster().stats().upstream_internal_redirect_failed_total_.inc(); + } + } + } + if (!redirected) { + request.callbacks_.onResponse(std::move(value)); + } + } else { + request.callbacks_.onResponse(std::move(value)); } + pending_requests_.pop_front(); // If there are no remaining ops in the pipeline we need to disable the timer. - // Otherwise we boost the timer since we are receiving responses and there are more to flush out. + // Otherwise we boost the timer since we are receiving responses and there are more to flush + // out. if (pending_requests_.empty()) { connect_or_op_timer_->disableTimer(); } else { diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h index 8fa9e817df9df..5a44d39e82687 100644 --- a/source/extensions/filters/network/common/redis/client_impl.h +++ b/source/extensions/filters/network/common/redis/client_impl.h @@ -10,6 +10,7 @@ #include "common/common/hash.h" #include "common/network/filter_impl.h" #include "common/protobuf/utility.h" +#include "common/singleton/const_singleton.h" #include "common/upstream/load_balancer_impl.h" #include "extensions/filters/network/common/redis/client.h" @@ -24,6 +25,13 @@ namespace Client { // TODO(mattklein123): Circuit breaking // TODO(rshriram): Fault injection +struct RedirectionValues { + const std::string ASK = "ASK"; + const std::string MOVED = "MOVED"; +}; + +typedef ConstSingleton RedirectionResponse; + class ConfigImpl : public Config { public: ConfigImpl( @@ -32,10 +40,12 @@ class ConfigImpl : public Config { bool disableOutlierEvents() const override { return false; } std::chrono::milliseconds opTimeout() const override { return op_timeout_; } bool enableHashtagging() const override { return enable_hashtagging_; } + bool enableRedirection() const override { return enable_redirection_; } private: const std::chrono::milliseconds op_timeout_; const bool enable_hashtagging_; + const bool enable_redirection_; }; class ClientImpl : public Client, public DecoderCallbacks, public Network::ConnectionCallbacks { diff --git a/source/extensions/filters/network/common/redis/codec.h b/source/extensions/filters/network/common/redis/codec.h index 8774ff454c299..dda00888c8008 100644 --- a/source/extensions/filters/network/common/redis/codec.h +++ b/source/extensions/filters/network/common/redis/codec.h @@ -27,6 +27,11 @@ class RespValue { RespValue() : type_(RespType::Null) {} ~RespValue() { cleanup(); } + RespValue(const RespValue& other); // copy constructor + RespValue& operator=(const RespValue& other); // copy assignment + bool operator==(const RespValue& other) const; // test for equality, unit tests + bool operator!=(const RespValue& other) const { return !(*this == other); } + /** * Convert a RESP value to a string for debugging purposes. */ diff --git a/source/extensions/filters/network/common/redis/codec_impl.cc b/source/extensions/filters/network/common/redis/codec_impl.cc index 3f57533d9d437..92faca71da2a9 100644 --- a/source/extensions/filters/network/common/redis/codec_impl.cc +++ b/source/extensions/filters/network/common/redis/codec_impl.cc @@ -116,6 +116,83 @@ void RespValue::type(RespType type) { } } +RespValue::RespValue(const RespValue& other) : type_(RespType::Null) { + this->type(other.type()); + switch (type_) { + case RespType::Array: { + this->asArray() = other.asArray(); + break; + } + case RespType::SimpleString: + case RespType::BulkString: + case RespType::Error: { + this->asString() = other.asString(); + break; + } + case RespType::Integer: { + this->asInteger() = other.asInteger(); + break; + } + case RespType::Null: + break; + } +} + +RespValue& RespValue::operator=(const RespValue& other) { + if (&other == this) { + return *this; + } + this->type(other.type()); + switch (type_) { + case RespType::Array: { + this->asArray() = other.asArray(); + break; + } + case RespType::SimpleString: + case RespType::BulkString: + case RespType::Error: { + this->asString() = other.asString(); + break; + } + case RespType::Integer: { + this->asInteger() = other.asInteger(); + break; + } + case RespType::Null: + break; + } + return *this; +} + +bool RespValue::operator==(const RespValue& other) const { + bool result = false; + if (type_ != other.type()) { + return result; + } + + switch (type_) { + case RespType::Array: { + result = (this->asArray() == other.asArray()); + break; + } + case RespType::SimpleString: + case RespType::BulkString: + case RespType::Error: { + result = (this->asString() == other.asString()); + break; + } + case RespType::Integer: { + result = (this->asInteger() == other.asInteger()); + break; + } + case RespType::Null: { + result = true; + break; + } + } + return result; +} + void DecoderImpl::decode(Buffer::Instance& data) { uint64_t num_slices = data.getRawSlices(nullptr, 0); STACK_ARRAY(slices, Buffer::RawSlice, num_slices); diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 8cd0a234462e0..4c56109ada4cd 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -59,9 +59,11 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/network:address_lib", "//source/common/network:filter_lib", "//source/common/protobuf:utility_lib", "//source/common/upstream:load_balancer_lib", + "//source/common/upstream:upstream_lib", "//source/extensions/filters/network/common/redis:client_lib", "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], diff --git a/source/extensions/filters/network/redis_proxy/command_splitter.h b/source/extensions/filters/network/redis_proxy/command_splitter.h index 6f517c4cb42c6..678a9e9807907 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter.h @@ -50,14 +50,14 @@ class Instance { virtual ~Instance() {} /** - * Make a split redis request. - * @param request supplies the split request to make. + * Make a split redis request capable of being retried/redirected. + * @param request supplies the split request to make (ownership transferred to call). * @param callbacks supplies the split request completion callbacks. * @return SplitRequestPtr a handle to the active request or nullptr if the request has already * been satisfied (via onResponse() being called). The splitter ALWAYS calls * onResponse() for a given request. */ - virtual SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, + virtual SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks) PURE; }; diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index beea0fbaa32ee..136ad69169cc8 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -25,6 +25,43 @@ Common::Redis::RespValuePtr Utility::makeError(const std::string& error) { return response; } +namespace { + +/** + * Validate the received moved/ask redirection error and the original redis request. + * @param[in] original_request supplies the incoming request associated with the command splitter + * request. + * @param[in] error_response supplies the moved/ask redirection response from the upstream Redis + * server. + * @param[out] error_substrings the non-whitespace substrings of error_response. + * @param[out] ask_redirection true if error_response is an ASK redirection error, false otherwise. + * @return bool true if the original_request or error_response are not valid, false otherwise. + */ +bool redirectionArgsInvalid(const Common::Redis::RespValue* original_request, + const Common::Redis::RespValue& error_response, + std::vector& error_substrings, + bool& ask_redirection) { + if ((original_request == nullptr) || (error_response.type() != Common::Redis::RespType::Error)) { + return true; + } + error_substrings = StringUtil::splitToken(error_response.asString(), " ", false); + if (error_substrings.size() != 3) { + return true; + } + if (error_substrings[0] == "ASK") { + ask_redirection = true; + } else if (error_substrings[0] == "MOVED") { + ask_redirection = false; + } else { + // The first substring must be MOVED or ASK. + return true; + } + // Other validation done later to avoid duplicate processing. + return false; +} + +} // namespace + void SplitRequestBase::onWrongNumberOfArguments(SplitCallbacks& callbacks, const Common::Redis::RespValue& request) { callbacks.onResponse(Utility::makeError( @@ -54,51 +91,86 @@ void SingleServerRequest::onFailure() { callbacks_.onResponse(Utility::makeError(Response::get().UpstreamFailure)); } +void SingleServerRequest::recreate(Common::Redis::RespValue& request, bool prepend_asking) { + if (!prepend_asking) { + request = *incoming_request_; + return; + } + + Common::Redis::RespValue asking_cmd; + asking_cmd.type(Common::Redis::RespType::BulkString); + asking_cmd.asString() = "asking"; + + request.type(Common::Redis::RespType::Array); + request.asArray().push_back(asking_cmd); + request.asArray().insert(request.asArray().end(), incoming_request_->asArray().begin(), + incoming_request_->asArray().end()); +} + +bool SingleServerRequest::onRedirection(const Common::Redis::RespValue& value) { + std::vector err; + bool ask_redirection = false; + if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool_) { + return false; + } + + Common::Redis::RespValue request; + recreate(request, ask_redirection); + + const std::string host_address = std::string(err[2]); // ip:port + handle_ = conn_pool_->makeRequestToHost(host_address, request, *this); + return (handle_ != nullptr); +} + void SingleServerRequest::cancel() { handle_->cancel(); handle_ = nullptr; } SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ new SimpleRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[1].asString(), - incoming_request, *request_ptr); + request_ptr->conn_pool_ = &conn_pool; + request_ptr->handle_ = conn_pool.makeRequest(incoming_request->asArray()[1].asString(), + *incoming_request, *request_ptr); if (!request_ptr->handle_) { - request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); + callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); return nullptr; } + request_ptr->incoming_request_ = std::move(incoming_request); return std::move(request_ptr); } SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { - // EVAL looks like: EVAL script numkeys key [key ...] arg [arg ...] // Ensure there are at least three args to the command or it cannot be hashed. - if (incoming_request.asArray().size() < 4) { - onWrongNumberOfArguments(callbacks, incoming_request); + if (incoming_request->asArray().size() < 4) { + onWrongNumberOfArguments(callbacks, *incoming_request); command_stats.error_.inc(); return nullptr; } std::unique_ptr request_ptr{ new EvalRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[3].asString(), - incoming_request, *request_ptr); + + request_ptr->conn_pool_ = &conn_pool; + request_ptr->handle_ = conn_pool.makeRequest(incoming_request->asArray()[3].asString(), + *incoming_request, *request_ptr); if (!request_ptr->handle_) { command_stats.error_.inc(); - request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); + callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); return nullptr; } + request_ptr->incoming_request_ = std::move(incoming_request); return std::move(request_ptr); } @@ -124,13 +196,13 @@ void FragmentedRequest::onChildFailure(uint32_t index) { } SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ new MGETRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->num_pending_responses_ = incoming_request.asArray().size() - 1; + request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); request_ptr->pending_response_ = std::make_unique(); @@ -146,20 +218,26 @@ SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, single_mget.type(Common::Redis::RespType::Array); single_mget.asArray().swap(values); - for (uint64_t i = 1; i < incoming_request.asArray().size(); i++) { + for (uint64_t i = 1; i < incoming_request->asArray().size(); i++) { request_ptr->pending_requests_.emplace_back(*request_ptr, i - 1); PendingRequest& pending_request = request_ptr->pending_requests_.back(); - single_mget.asArray()[1].asString() = incoming_request.asArray()[i].asString(); + single_mget.asArray()[1].asString() = incoming_request->asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel get: '{}'", single_mget.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), + pending_request.conn_pool_ = &conn_pool; + pending_request.handle_ = conn_pool.makeRequest(incoming_request->asArray()[i].asString(), single_mget, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } } - return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; + if (request_ptr->num_pending_responses_ > 0) { + request_ptr->incoming_request_ = std::move(incoming_request); + return std::move(request_ptr); + } + + return nullptr; } void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) { @@ -195,19 +273,53 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } +void MGETRequest::recreate(Common::Redis::RespValue& request, uint32_t index, bool prepend_asking) { + static const uint32_t GET_COMMAND_SUBSTRINGS = 2; + uint32_t num_values = prepend_asking ? (GET_COMMAND_SUBSTRINGS + 1) : GET_COMMAND_SUBSTRINGS; + std::vector values(num_values); + + for (uint32_t i = 0; i < num_values; i++) { + values[i].type(Common::Redis::RespType::BulkString); + } + values[--num_values].asString() = incoming_request_->asArray()[index + 1].asString(); + values[--num_values].asString() = "get"; + if (prepend_asking) { + values[--num_values].asString() = "asking"; + } + + request.type(Common::Redis::RespType::Array); + request.asArray().swap(values); +} + +bool MGETRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, + ConnPool::Instance* conn_pool) { + std::vector err; + bool ask_redirection = false; + if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { + return false; + } + + Common::Redis::RespValue request; + recreate(request, index, ask_redirection); + + this->pending_requests_[index].handle_ = + conn_pool->makeRequestToHost(std::string(err[2]), request, this->pending_requests_[index]); + return (this->pending_requests_[index].handle_ != nullptr); +} + SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { - if ((incoming_request.asArray().size() - 1) % 2 != 0) { - onWrongNumberOfArguments(callbacks, incoming_request); + if ((incoming_request->asArray().size() - 1) % 2 != 0) { + onWrongNumberOfArguments(callbacks, *incoming_request); command_stats.error_.inc(); return nullptr; } std::unique_ptr request_ptr{ new MSETRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->num_pending_responses_ = (incoming_request.asArray().size() - 1) / 2; + request_ptr->num_pending_responses_ = (incoming_request->asArray().size() - 1) / 2; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); request_ptr->pending_response_ = std::make_unique(); @@ -223,22 +335,28 @@ SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, single_mset.asArray().swap(values); uint64_t fragment_index = 0; - for (uint64_t i = 1; i < incoming_request.asArray().size(); i += 2) { + for (uint64_t i = 1; i < incoming_request->asArray().size(); i += 2) { request_ptr->pending_requests_.emplace_back(*request_ptr, fragment_index++); PendingRequest& pending_request = request_ptr->pending_requests_.back(); - single_mset.asArray()[1].asString() = incoming_request.asArray()[i].asString(); - single_mset.asArray()[2].asString() = incoming_request.asArray()[i + 1].asString(); + single_mset.asArray()[1].asString() = incoming_request->asArray()[i].asString(); + single_mset.asArray()[2].asString() = incoming_request->asArray()[i + 1].asString(); ENVOY_LOG(debug, "redis: parallel set: '{}'", single_mset.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), + pending_request.conn_pool_ = &conn_pool; + pending_request.handle_ = conn_pool.makeRequest(incoming_request->asArray()[i].asString(), single_mset, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } } - return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; + if (request_ptr->num_pending_responses_ > 0) { + request_ptr->incoming_request_ = std::move(incoming_request); + return std::move(request_ptr); + } + + return nullptr; } void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) { @@ -270,15 +388,50 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } +void MSETRequest::recreate(Common::Redis::RespValue& request, uint32_t index, bool prepend_asking) { + static const uint32_t SET_COMMAND_SUBSTRINGS = 3; + uint32_t num_values = prepend_asking ? (SET_COMMAND_SUBSTRINGS + 1) : SET_COMMAND_SUBSTRINGS; + std::vector values(num_values); + + for (uint32_t i = 0; i < num_values; i++) { + values[i].type(Common::Redis::RespType::BulkString); + } + values[--num_values].asString() = incoming_request_->asArray()[(index * 2) + 2].asString(); + values[--num_values].asString() = incoming_request_->asArray()[(index * 2) + 1].asString(); + values[--num_values].asString() = "set"; + if (prepend_asking) { + values[--num_values].asString() = "asking"; + } + + request.type(Common::Redis::RespType::Array); + request.asArray().swap(values); +} + +bool MSETRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, + ConnPool::Instance* conn_pool) { + std::vector err; + bool ask_redirection = false; + if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { + return false; + } + + Common::Redis::RespValue request; + recreate(request, index, ask_redirection); + + this->pending_requests_[index].handle_ = + conn_pool->makeRequestToHost(std::string(err[2]), request, this->pending_requests_[index]); + return (this->pending_requests_[index].handle_ != nullptr); +} + SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ new SplitKeysSumResultRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->num_pending_responses_ = incoming_request.asArray().size() - 1; + request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); request_ptr->pending_response_ = std::make_unique(); @@ -286,27 +439,33 @@ SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, std::vector values(2); values[0].type(Common::Redis::RespType::BulkString); - values[0].asString() = incoming_request.asArray()[0].asString(); + values[0].asString() = incoming_request->asArray()[0].asString(); values[1].type(Common::Redis::RespType::BulkString); Common::Redis::RespValue single_fragment; single_fragment.type(Common::Redis::RespType::Array); single_fragment.asArray().swap(values); - for (uint64_t i = 1; i < incoming_request.asArray().size(); i++) { + for (uint64_t i = 1; i < incoming_request->asArray().size(); i++) { request_ptr->pending_requests_.emplace_back(*request_ptr, i - 1); PendingRequest& pending_request = request_ptr->pending_requests_.back(); - single_fragment.asArray()[1].asString() = incoming_request.asArray()[i].asString(); - ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request.asArray()[0].asString(), + single_fragment.asArray()[1].asString() = incoming_request->asArray()[i].asString(); + ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request->asArray()[0].asString(), single_fragment.toString()); - pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(), + pending_request.conn_pool_ = &conn_pool; + pending_request.handle_ = conn_pool.makeRequest(incoming_request->asArray()[i].asString(), single_fragment, pending_request); if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } } - return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; + if (request_ptr->num_pending_responses_ > 0) { + request_ptr->incoming_request_ = std::move(incoming_request); + return std::move(request_ptr); + } + + return nullptr; } void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& value, @@ -337,6 +496,41 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va } } +void SplitKeysSumResultRequest::recreate(Common::Redis::RespValue& request, uint32_t index, + bool prepend_asking) { + static const uint32_t BASE_COMMAND_SUBSTRINGS = 2; + uint32_t num_values = prepend_asking ? (BASE_COMMAND_SUBSTRINGS + 1) : BASE_COMMAND_SUBSTRINGS; + std::vector values(num_values); + + for (uint32_t i = 0; i < num_values; i++) { + values[i].type(Common::Redis::RespType::BulkString); + } + values[--num_values].asString() = incoming_request_->asArray()[index + 1].asString(); + values[--num_values].asString() = incoming_request_->asArray()[0].asString(); + if (prepend_asking) { + values[--num_values].asString() = "asking"; + } + + request.type(Common::Redis::RespType::Array); + request.asArray().swap(values); +} + +bool SplitKeysSumResultRequest::onChildRedirection(const Common::Redis::RespValue& value, + uint32_t index, ConnPool::Instance* conn_pool) { + std::vector err; + bool ask_redirection = false; + if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { + return false; + } + + Common::Redis::RespValue request; + recreate(request, index, ask_redirection); + + this->pending_requests_[index].handle_ = + conn_pool->makeRequestToHost(std::string(err[2]), request, this->pending_requests_[index]); + return (this->pending_requests_[index].handle_ != nullptr); +} + InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, const std::string& stat_prefix, TimeSource& time_source, bool latency_in_micros) @@ -362,14 +556,14 @@ InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scop addHandler(scope, stat_prefix, Common::Redis::SupportedCommands::mset(), mset_handler_); } -SplitRequestPtr InstanceImpl::makeRequest(const Common::Redis::RespValue& request, +SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks) { - if (request.type() != Common::Redis::RespType::Array) { + if (request->type() != Common::Redis::RespType::Array) { onInvalidRequest(callbacks); return nullptr; } - std::string to_lower_string(request.asArray()[0].asString()); + std::string to_lower_string(request->asArray()[0].asString()); to_lower_table_.toLowerCase(to_lower_string); if (to_lower_string == Common::Redis::SupportedCommands::ping()) { @@ -381,13 +575,13 @@ SplitRequestPtr InstanceImpl::makeRequest(const Common::Redis::RespValue& reques return nullptr; } - if (request.asArray().size() < 2) { + if (request->asArray().size() < 2) { // Commands other than PING all have at least two arguments. onInvalidRequest(callbacks); return nullptr; } - for (const Common::Redis::RespValue& value : request.asArray()) { + for (const Common::Redis::RespValue& value : request->asArray()) { if (value.type() != Common::Redis::RespType::BulkString) { onInvalidRequest(callbacks); return nullptr; @@ -398,13 +592,13 @@ SplitRequestPtr InstanceImpl::makeRequest(const Common::Redis::RespValue& reques if (handler == nullptr) { stats_.unsupported_command_.inc(); callbacks.onResponse(Utility::makeError( - fmt::format("unsupported command '{}'", request.asArray()[0].asString()))); + fmt::format("unsupported command '{}'", request->asArray()[0].asString()))); return nullptr; } - ENVOY_LOG(debug, "redis: splitting '{}'", request.toString()); + ENVOY_LOG(debug, "redis: splitting '{}'", request->toString()); handler->command_stats_.total_.inc(); SplitRequestPtr request_ptr = handler->handler_.get().startRequest( - request, callbacks, handler->command_stats_, time_source_, latency_in_micros_); + std::move(request), callbacks, handler->command_stats_, time_source_, latency_in_micros_); return request_ptr; } diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index b7ac2b90f409b..e6b1d475464e3 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -61,7 +61,7 @@ class CommandHandler { public: virtual ~CommandHandler() {} - virtual SplitRequestPtr startRequest(const Common::Redis::RespValue& request, + virtual SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) PURE; }; @@ -103,6 +103,7 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien // Common::Redis::Client::PoolCallbacks void onResponse(Common::Redis::RespValuePtr&& response) override; void onFailure() override; + bool onRedirection(const Common::Redis::RespValue& value) override; // RedisProxy::CommandSplitter::SplitRequest void cancel() override; @@ -112,8 +113,12 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien TimeSource& time_source, bool latency_in_micros) : SplitRequestBase(command_stats, time_source, latency_in_micros), callbacks_(callbacks) {} + void recreate(Common::Redis::RespValue& request, bool prepend_asking); + SplitCallbacks& callbacks_; + ConnPool::Instance* conn_pool_{}; Common::Redis::Client::PoolRequest* handle_{}; + Common::Redis::RespValuePtr incoming_request_; }; /** @@ -122,7 +127,7 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien class SimpleRequest : public SingleServerRequest { public: static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -138,7 +143,7 @@ class SimpleRequest : public SingleServerRequest { class EvalRequest : public SingleServerRequest { public: static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -174,15 +179,24 @@ class FragmentedRequest : public SplitRequestBase { } void onFailure() override { parent_.onChildFailure(index_); } + bool onRedirection(const Common::Redis::RespValue& value) override { + return parent_.onChildRedirection(value, index_, conn_pool_); + } + FragmentedRequest& parent_; const uint32_t index_; Common::Redis::Client::PoolRequest* handle_{}; + ConnPool::Instance* conn_pool_{}; }; virtual void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) PURE; void onChildFailure(uint32_t index); + virtual bool onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, + ConnPool::Instance* conn_pool) PURE; SplitCallbacks& callbacks_; + + Common::Redis::RespValuePtr incoming_request_; Common::Redis::RespValuePtr pending_response_; std::vector pending_requests_; uint32_t num_pending_responses_; @@ -196,7 +210,7 @@ class FragmentedRequest : public SplitRequestBase { class MGETRequest : public FragmentedRequest, Logger::Loggable { public: static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -207,6 +221,9 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -229,6 +246,9 @@ class SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable { public: static SplitRequestPtr create(ConnPool::Instance& conn_pool, - const Common::Redis::RespValue& incoming_request, + Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -252,6 +272,9 @@ class MSETRequest : public FragmentedRequest, Logger::Loggable class CommandHandlerFactory : public CommandHandler, CommandHandlerBase { public: CommandHandlerFactory(ConnPool::Instance& conn_pool) : CommandHandlerBase(conn_pool) {} - SplitRequestPtr startRequest(const Common::Redis::RespValue& request, SplitCallbacks& callbacks, + SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { - return RequestClass::create(conn_pool_, request, callbacks, command_stats, time_source, - latency_in_micros); + return RequestClass::create(conn_pool_, std::move(request), callbacks, command_stats, + time_source, latency_in_micros); } }; @@ -292,7 +315,7 @@ class InstanceImpl : public Instance, Logger::Loggable { const std::string& stat_prefix, TimeSource& time_source, bool latency_in_micros); // RedisProxy::CommandSplitter::Instance - SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, + SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks) override; private: diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index 442219e79b547..44ec83c76779b 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -34,6 +34,20 @@ class Instance { virtual Common::Redis::Client::PoolRequest* makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks& callbacks) PURE; + + /** + * Makes a redis request based on IP address and TCP port of the upstream host (e.g., moved/ask + * cluster redirection). + * @param host_address supplies the IP address and TCP port of the upstream host to receive the + * request. + * @param request supplies the Redis request to make. + * @param callbacks supplies the request completion callbacks. + * @return PoolRequest* a handle to the active request or nullptr if the request could not be made + * for some reason. + */ + virtual Common::Redis::Client::PoolRequest* + makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) PURE; }; typedef std::unique_ptr InstancePtr; diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index edc12ac5c3a88..deaa1147bf869 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -30,6 +30,13 @@ InstanceImpl::makeRequest(const std::string& key, const Common::Redis::RespValue return tls_->getTyped().makeRequest(key, value, callbacks); } +Common::Redis::Client::PoolRequest* +InstanceImpl::makeRequestToHost(const std::string& host_address, + const Common::Redis::RespValue& value, + Common::Redis::Client::PoolCallbacks& callbacks) { + return tls_->getTyped().makeRequestToHost(host_address, value, callbacks); +} + InstanceImpl::ThreadLocalPool::ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, std::string cluster_name) : parent_(parent), dispatcher_(dispatcher), cluster_name_(std::move(cluster_name)) { @@ -69,6 +76,13 @@ void InstanceImpl::ThreadLocalPool::onClusterAddOrUpdateNonVirtual( const std::vector& hosts_removed) -> void { onHostsRemoved(hosts_removed); }); + + ASSERT(host_address_map_.empty()); + for (uint32_t i = 0; i < cluster_->prioritySet().hostSetsPerPriority().size(); i++) { + for (auto& host : cluster_->prioritySet().hostSetsPerPriority()[i]->hosts()) { + host_address_map_[host->address()->asString()] = host; + } + } } void InstanceImpl::ThreadLocalPool::onClusterRemoval(const std::string& cluster_name) { @@ -84,6 +98,7 @@ void InstanceImpl::ThreadLocalPool::onClusterRemoval(const std::string& cluster_ cluster_ = nullptr; host_set_member_update_cb_handle_ = nullptr; + host_address_map_.clear(); } void InstanceImpl::ThreadLocalPool::onHostsRemoved( @@ -95,6 +110,7 @@ void InstanceImpl::ThreadLocalPool::onHostsRemoved( // we just close the connection. This will fail any pending requests. it->second->redis_client_->close(); } + host_address_map_.erase(host->address()->asString()); } } @@ -122,6 +138,88 @@ InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, client->redis_client_->addConnectionCallbacks(*client); } + // Keep host_address_map_ in sync with client_map_. + auto host_cached_by_address = host_address_map_.find(host->address()->asString()); + if (host_cached_by_address == host_address_map_.end()) { + host_address_map_[host->address()->asString()] = host; + } + + return client->redis_client_->makeRequest(request, callbacks); +} + +Common::Redis::Client::PoolRequest* +InstanceImpl::ThreadLocalPool::makeRequestToHost(const std::string& host_address, + const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) { + if (cluster_ == nullptr) { + ASSERT(client_map_.empty()); + ASSERT(host_set_member_update_cb_handle_ == nullptr); + return nullptr; + } + + auto colon_pos = host_address.rfind(":"); + if ((colon_pos == std::string::npos) || (colon_pos == (host_address.size() - 1))) { + return nullptr; + } + + const std::string ip_address = host_address.substr(0, colon_pos); + const bool ipv6 = (ip_address.find(":") != std::string::npos); + std::string host_address_map_key; + Network::Address::InstanceConstSharedPtr address_ptr; + + if (!ipv6) { + host_address_map_key = host_address; + } else { + const std::string ip_port = host_address.substr(colon_pos + 1); + uint64_t ip_port_number; + if (!StringUtil::atoull(ip_port.c_str(), ip_port_number) || (ip_port_number > 65535)) { + return nullptr; + } + try { + address_ptr = std::make_shared(ip_address, ip_port_number); + } catch (const EnvoyException&) { + return nullptr; + } + host_address_map_key = address_ptr->asString(); + } + + auto it = host_address_map_.find(host_address_map_key); + if (it == host_address_map_.end()) { + // This host is not known to the cluster manager. Create a new host and insert it into the map. + // TODO(msukalski): Add logic to track the number of these "unknown" host connections, + // cap the number of these connections, and implement time-out and cleaning logic, etc. + + if (!ipv6) { + // Only create an IPv4 address instance if we need a new Upstream::HostImpl. + const std::string ip_port = host_address.substr(colon_pos + 1); + uint64_t ip_port_number; + if (!StringUtil::atoull(ip_port.c_str(), ip_port_number) || (ip_port_number > 65535)) { + return nullptr; + } + try { + address_ptr = std::make_shared(ip_address, ip_port_number); + } catch (const EnvoyException&) { + return nullptr; + } + } + Upstream::HostSharedPtr new_host{new Upstream::HostImpl( + cluster_->info(), "", address_ptr, envoy::api::v2::core::Metadata::default_instance(), 1, + envoy::api::v2::core::Locality(), + envoy::api::v2::endpoint::Endpoint::HealthCheckConfig::default_instance(), 0, + envoy::api::v2::core::HealthStatus::UNKNOWN)}; + host_address_map_[host_address_map_key] = new_host; + it = host_address_map_.find(host_address_map_key); + } + + ThreadLocalActiveClientPtr& client = client_map_[it->second]; + if (!client) { + client = std::make_unique(*this); + client->host_ = it->second; + client->redis_client_ = + parent_.client_factory_.create(it->second, dispatcher_, parent_.config_); + client->redis_client_->addConnectionCallbacks(*client); + } + return client->redis_client_->makeRequest(request, callbacks); } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index 1dfb363573ab2..ef39f732d1b9e 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -14,9 +14,12 @@ #include "common/buffer/buffer_impl.h" #include "common/common/hash.h" +#include "common/network/address_impl.h" #include "common/network/filter_impl.h" #include "common/protobuf/utility.h" +#include "common/singleton/const_singleton.h" #include "common/upstream/load_balancer_impl.h" +#include "common/upstream/upstream_impl.h" #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/common/redis/codec_impl.h" @@ -42,6 +45,9 @@ class InstanceImpl : public Instance { Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks& callbacks) override; + Common::Redis::Client::PoolRequest* + makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) override; private: struct ThreadLocalPool; @@ -68,6 +74,9 @@ class InstanceImpl : public Instance { Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks& callbacks); + Common::Redis::Client::PoolRequest* + makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks); void onClusterAddOrUpdateNonVirtual(Upstream::ThreadLocalCluster& cluster); void onHostsRemoved(const std::vector& hosts_removed); @@ -84,6 +93,7 @@ class InstanceImpl : public Instance { Upstream::ThreadLocalCluster* cluster_{}; std::unordered_map client_map_; Envoy::Common::CallbackHandle* host_set_member_update_cb_handle_{}; + std::unordered_map host_address_map_; }; struct LbContextImpl : public Upstream::LoadBalancerContextBase { diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index d5fc143e9be09..4fa59b5ad320e 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -53,7 +53,7 @@ void ProxyFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& ca void ProxyFilter::onRespValue(Common::Redis::RespValuePtr&& value) { pending_requests_.emplace_back(*this); PendingRequest& request = pending_requests_.back(); - CommandSplitter::SplitRequestPtr split = splitter_.makeRequest(*value, request); + CommandSplitter::SplitRequestPtr split = splitter_.makeRequest(std::move(value), request); if (split) { // The splitter can immediately respond and destroy the pending request. Only store the handle // if the request is still alive. diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index 3b396955f6a1e..615c99c92f499 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -99,6 +99,14 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onFailure() { handleFailure(envoy::data::core::v2alpha::HealthCheckFailureType::NETWORK); } +bool RedisHealthChecker::RedisActiveHealthCheckSession::onRedirection( + const NetworkFilters::Common::Redis::RespValue&) { + // Treat any redirection error response from a Redis server as success. + current_request_ = nullptr; + handleSuccess(); + return true; +} + void RedisHealthChecker::RedisActiveHealthCheckSession::onTimeout() { current_request_->cancel(); current_request_ = nullptr; diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 8b287a5a81c3e..7c93b017b5a90 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -59,10 +59,14 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { return parent_.timeout_ * 2; } bool enableHashtagging() const override { return false; } + bool enableRedirection() const override { + return true; + } // Redirection errors are treated as check successes. // Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override; void onFailure() override; + bool onRedirection(const NetworkFilters::Common::Redis::RespValue& value) override; // Network::ConnectionCallbacks void onEvent(Network::ConnectionEvent event) override; diff --git a/test/config/utility.cc b/test/config/utility.cc index 207a9e98453c8..971244540ee8f 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -212,9 +212,23 @@ ConfigHelper::ConfigHelper(const Network::Address::IpVersion version, Api::Api& for (int i = 0; i < static_resources->clusters_size(); ++i) { auto* cluster = static_resources->mutable_clusters(i); - if (!cluster->hosts().empty() && cluster->mutable_hosts(0)->has_socket_address()) { - auto host_socket_addr = cluster->mutable_hosts(0)->mutable_socket_address(); - host_socket_addr->set_address(Network::Test::getLoopbackAddressString(version)); + if (!cluster->hosts().empty()) { + for (int j = 0; j < cluster->hosts().size(); j++) { + if (cluster->mutable_hosts(j)->has_socket_address()) { + auto host_socket_addr = cluster->mutable_hosts(j)->mutable_socket_address(); + host_socket_addr->set_address(Network::Test::getLoopbackAddressString(version)); + } + } + } + for (int j = 0; j < cluster->load_assignment().endpoints_size(); ++j) { + auto locality_lb = cluster->mutable_load_assignment()->mutable_endpoints(j); + for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) { + auto lb_endpoint = locality_lb->mutable_lb_endpoints(k); + if (lb_endpoint->endpoint().address().has_socket_address()) { + lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address( + Network::Test::getLoopbackAddressString(version)); + } + } } } } diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc index 6bd29673b350d..d5c0e2ac7fae6 100644 --- a/test/extensions/filters/network/common/redis/client_impl_test.cc +++ b/test/extensions/filters/network/common/redis/client_impl_test.cc @@ -295,6 +295,7 @@ class ConfigOutlierDisabled : public Config { bool disableOutlierEvents() const override { return true; } std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); } bool enableHashtagging() const override { return false; } + bool enableRedirection() const override { return false; } }; TEST_F(RedisClientImplTest, OutlierDisabled) { @@ -361,6 +362,239 @@ TEST_F(RedisClientImplTest, OpTimeout) { EXPECT_EQ(1UL, host_->stats_.rq_timeout_.value()); } +TEST_F(RedisClientImplTest, AskRedirection) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + Common::Redis::RespValue request2; + MockPoolCallbacks callbacks2; + EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); + EXPECT_NE(nullptr, handle2); + + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_total_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_active_.value()); + + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response1->asString() = "ASK 1111 10.1.2.3:4321"; + // Simulate redirection failure. + EXPECT_CALL(callbacks1, onRedirection(Ref(*response1))).WillOnce(Return(false)); + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value()); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response2->asString() = "ASK 2222 10.1.2.4:4321"; + EXPECT_CALL(callbacks2, onRedirection(Ref(*response2))).WillOnce(Return(true)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response2)); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value()); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +TEST_F(RedisClientImplTest, MovedRedirection) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + Common::Redis::RespValue request2; + MockPoolCallbacks callbacks2; + EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); + EXPECT_NE(nullptr, handle2); + + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_total_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_active_.value()); + + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response1->asString() = "MOVED 1111 10.1.2.3:4321"; + // Simulate redirection failure. + EXPECT_CALL(callbacks1, onRedirection(Ref(*response1))).WillOnce(Return(false)); + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value()); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response2->asString() = "MOVED 2222 10.1.2.4:4321"; + EXPECT_CALL(callbacks2, onRedirection(Ref(*response2))).WillOnce(Return(true)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response2)); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value()); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +TEST_F(RedisClientImplTest, AskRedirectionNotEnabled) { + InSequence s; + + setup(std::make_unique(createConnPoolSettings(20, true, false))); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + Common::Redis::RespValue request2; + MockPoolCallbacks callbacks2; + EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); + EXPECT_NE(nullptr, handle2); + + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_total_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_active_.value()); + + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response1->asString() = "ASK 1111 10.1.2.3:4321"; + // Simulate redirection failure. + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value()); + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value()); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response2->asString() = "ASK 2222 10.1.2.4:4321"; + EXPECT_CALL(callbacks2, onResponse_(Ref(response2))); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response2)); + + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value()); + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value()); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +TEST_F(RedisClientImplTest, MovedRedirectionNotEnabled) { + InSequence s; + + setup(std::make_unique(createConnPoolSettings(20, true, false))); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + Common::Redis::RespValue request2; + MockPoolCallbacks callbacks2; + EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); + EXPECT_NE(nullptr, handle2); + + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_total_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_active_.value()); + + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response1->asString() = "MOVED 1111 10.1.2.3:4321"; + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value()); + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value()); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Error); + // The exact values of the hash slot and IP info are not important. + response2->asString() = "MOVED 2222 10.1.2.4:4321"; + EXPECT_CALL(callbacks2, onResponse_(Ref(response2))); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response2)); + + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_succeeded_total_.value()); + EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_internal_redirect_failed_total_.value()); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + TEST(RedisClientFactoryImplTest, Basic) { ClientFactoryImpl factory; Upstream::MockHost::MockCreateConnectionData conn_info; diff --git a/test/extensions/filters/network/common/redis/codec_impl_test.cc b/test/extensions/filters/network/common/redis/codec_impl_test.cc index 60f8c72eb8f05..35c7e17fa8bea 100644 --- a/test/extensions/filters/network/common/redis/codec_impl_test.cc +++ b/test/extensions/filters/network/common/redis/codec_impl_test.cc @@ -11,12 +11,111 @@ #include "gtest/gtest.h" +using testing::InSequence; + namespace Envoy { namespace Extensions { namespace NetworkFilters { namespace Common { namespace Redis { +class RedisRespValueTest : public testing::Test { +public: + void makeBulkStringArray(RespValue& value, const std::vector& strings) { + std::vector values(strings.size()); + for (uint64_t i = 0; i < strings.size(); i++) { + values[i].type(RespType::BulkString); + values[i].asString() = strings[i]; + } + + value.type(RespType::Array); + value.asArray().swap(values); + } + + void makeArray(RespValue& value, const std::vector items) { + value.type(RespType::Array); + value.asArray().insert(value.asArray().end(), items.begin(), items.end()); + } +}; + +TEST_F(RedisRespValueTest, EqualityTestingAndCopyingTest) { + InSequence s; + + RespValue value1, value2, value3; + + makeBulkStringArray(value1, {"get", "foo", "bar", "now"}); + makeBulkStringArray(value2, {"get", "foo", "bar", "now"}); + makeBulkStringArray(value3, {"get", "foo", "bar", "later"}); + + EXPECT_TRUE(value1 == value2); + EXPECT_FALSE(value1 == value3); + + RespValue value4, value5; + value4.type(RespType::Array); + value4.asArray() = {value1, value2}; + value5.type(RespType::Array); + value5.asArray() = {value1, value3}; + + EXPECT_FALSE(value4 == value5); + EXPECT_TRUE(value4 == value4); + EXPECT_TRUE(value5 == value5); + + RespValue bulkstring_value, simplestring_value, error_value, integer_value, null_value; + bulkstring_value.type(RespType::BulkString); + simplestring_value.type(RespType::SimpleString); + error_value.type(RespType::Error); + integer_value.type(RespType::Integer); + + EXPECT_NE(bulkstring_value, simplestring_value); + EXPECT_NE(bulkstring_value, error_value); + EXPECT_NE(bulkstring_value, integer_value); + EXPECT_NE(bulkstring_value, null_value); + + RespValue value6, value7, value8; + makeArray(value6, + {bulkstring_value, simplestring_value, error_value, integer_value, null_value, value1}); + makeArray(value7, + {bulkstring_value, simplestring_value, error_value, integer_value, null_value, value2}); + makeArray(value8, + {bulkstring_value, simplestring_value, error_value, integer_value, null_value, value3}); + + // This may look weird, but it is a way to actually do self-assignment without generating compiler + // warnings. Self-assignment should succeed without changing the RespValue, and therefore no + // expectations should change. + RespValue* value6_ptr = &value6; + value6 = *value6_ptr; + EXPECT_EQ(value6, value7); + EXPECT_NE(value6, value8); + EXPECT_NE(value7, value8); + EXPECT_EQ(value6.asArray()[5].asArray()[3].asString(), "now"); + EXPECT_EQ(value7.asArray()[5].asArray()[3].asString(), "now"); + EXPECT_EQ(value8.asArray()[5].asArray()[3].asString(), "later"); + + value8 = value1; + EXPECT_EQ(value8.type(), RespType::Array); + EXPECT_EQ(value8.asArray().size(), value1.asArray().size()); + EXPECT_EQ(value8.asArray().size(), 4); + for (unsigned int i = 0; i < value8.asArray().size(); i++) { + EXPECT_EQ(value8.asArray()[i].type(), RespType::BulkString); + EXPECT_EQ(value8.asArray()[i].asString(), value1.asArray()[i].asString()); + } + value7 = value1; + EXPECT_EQ(value7, value8); + value7 = value3; + EXPECT_NE(value7, value8); + + value8 = bulkstring_value; + EXPECT_EQ(value8.type(), RespType::BulkString); + value8 = simplestring_value; + EXPECT_EQ(value8.type(), RespType::SimpleString); + value8 = error_value; + EXPECT_EQ(value8.type(), RespType::Error); + value8 = integer_value; + EXPECT_EQ(value8.type(), RespType::Integer); + value8 = null_value; + EXPECT_EQ(value8.type(), RespType::Null); +} + class RedisEncoderDecoderImplTest : public testing::Test, public DecoderCallbacks { public: RedisEncoderDecoderImplTest() : decoder_(*this) {} diff --git a/test/extensions/filters/network/common/redis/mocks.cc b/test/extensions/filters/network/common/redis/mocks.cc index 9337421294aea..3a2c2110f4157 100644 --- a/test/extensions/filters/network/common/redis/mocks.cc +++ b/test/extensions/filters/network/common/redis/mocks.cc @@ -20,40 +20,6 @@ void PrintTo(const RespValue& value, std::ostream* os) { *os << value.toString() void PrintTo(const RespValuePtr& value, std::ostream* os) { *os << value->toString(); } -bool operator==(const RespValue& lhs, const RespValue& rhs) { - if (lhs.type() != rhs.type()) { - return false; - } - - switch (lhs.type()) { - case RespType::Array: { - if (lhs.asArray().size() != rhs.asArray().size()) { - return false; - } - - bool equal = true; - for (uint64_t i = 0; i < lhs.asArray().size(); i++) { - equal &= (lhs.asArray()[i] == rhs.asArray()[i]); - } - - return equal; - } - case RespType::SimpleString: - case RespType::BulkString: - case RespType::Error: { - return lhs.asString() == rhs.asString(); - } - case RespType::Null: { - return true; - } - case RespType::Integer: { - return lhs.asInteger() == rhs.asInteger(); - } - } - - NOT_REACHED_GCOVR_EXCL_LINE; -} - MockEncoder::MockEncoder() { ON_CALL(*this, encode(_, _)) .WillByDefault( diff --git a/test/extensions/filters/network/common/redis/mocks.h b/test/extensions/filters/network/common/redis/mocks.h index 1c6f573954361..37b90626c4e4c 100644 --- a/test/extensions/filters/network/common/redis/mocks.h +++ b/test/extensions/filters/network/common/redis/mocks.h @@ -23,7 +23,6 @@ namespace Redis { void PrintTo(const RespValue& value, std::ostream* os); void PrintTo(const RespValuePtr& value, std::ostream* os); -bool operator==(const RespValue& lhs, const RespValue& rhs); class MockEncoder : public Common::Redis::Encoder { public: @@ -94,6 +93,7 @@ class MockPoolCallbacks : public PoolCallbacks { MOCK_METHOD1(onResponse_, void(Common::Redis::RespValuePtr& value)); MOCK_METHOD0(onFailure, void()); + MOCK_METHOD1(onRedirection, bool(const Common::Redis::RespValue& value)); }; } // namespace Client diff --git a/test/extensions/filters/network/common/redis/test_utils.h b/test/extensions/filters/network/common/redis/test_utils.h index e1c418897e48b..c81cb2647f95b 100644 --- a/test/extensions/filters/network/common/redis/test_utils.h +++ b/test/extensions/filters/network/common/redis/test_utils.h @@ -16,10 +16,12 @@ namespace Redis { namespace Client { inline envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings -createConnPoolSettings() { +createConnPoolSettings(int64_t millis = 20, bool hashtagging = true, + bool redirection_support = true) { envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings setting{}; - setting.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(20)); - setting.set_enable_hashtagging(true); + setting.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(millis)); + setting.set_enable_hashtagging(hashtagging); + setting.set_enable_redirection(redirection_support); return setting; } diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index 2f4d8e30e1b0b..aa1964df7f7ad 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -9,8 +9,10 @@ #include "common/common/fmt.h" #include "common/stats/isolated_store_impl.h" +#include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/common/redis/supported_commands.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" +#include "extensions/filters/network/redis_proxy/conn_pool.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" @@ -36,6 +38,11 @@ class NullInstanceImpl : public ConnPool::Instance { Common::Redis::Client::PoolCallbacks&) override { return nullptr; } + Common::Redis::Client::PoolRequest* + makeRequestToHost(const std::string&, const Common::Redis::RespValue&, + Common::Redis::Client::PoolCallbacks&) override { + return nullptr; + } }; class CommandLookUpSpeedTest { @@ -53,15 +60,16 @@ class CommandLookUpSpeedTest { } void makeRequests() { - Common::Redis::RespValue request; for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { - makeBulkStringArray(request, {command, "hello"}); - splitter_.makeRequest(request, callbacks_); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {command, "hello"}); + splitter_.makeRequest(std::move(request), callbacks_); } for (const std::string& command : Common::Redis::SupportedCommands::evalCommands()) { - makeBulkStringArray(request, {command, "hello"}); - splitter_.makeRequest(request, callbacks_); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {command, "hello"}); + splitter_.makeRequest(std::move(request), callbacks_); } } diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index 252078432334a..b4f2c8fb70110 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -24,10 +24,12 @@ using testing::ByRef; using testing::DoAll; using testing::Eq; using testing::InSequence; +using testing::Invoke; using testing::NiceMock; using testing::Property; using testing::Ref; using testing::Return; +using testing::SaveArg; using testing::WithArg; namespace Envoy { @@ -64,8 +66,8 @@ TEST_F(RedisCommandSplitterImplTest, InvalidRequestNotArray) { response.type(Common::Redis::RespType::Error); response.asString() = Response::get().InvalidRequest; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - Common::Redis::RespValue request; - EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.splitter.invalid_request").value()); } @@ -75,9 +77,9 @@ TEST_F(RedisCommandSplitterImplTest, InvalidRequestArrayTooSmall) { response.type(Common::Redis::RespType::Error); response.asString() = Response::get().InvalidRequest; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - Common::Redis::RespValue request; - makeBulkStringArray(request, {"incr"}); - EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"incr"}); + EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.splitter.invalid_request").value()); } @@ -87,10 +89,10 @@ TEST_F(RedisCommandSplitterImplTest, InvalidRequestArrayNotStrings) { response.type(Common::Redis::RespType::Error); response.asString() = Response::get().InvalidRequest; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - Common::Redis::RespValue request; - makeBulkStringArray(request, {"incr", ""}); - request.asArray()[1].type(Common::Redis::RespType::Null); - EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"incr", ""}); + request->asArray()[1].type(Common::Redis::RespType::Null); + EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.splitter.invalid_request").value()); } @@ -100,9 +102,9 @@ TEST_F(RedisCommandSplitterImplTest, UnsupportedCommand) { response.type(Common::Redis::RespType::Error); response.asString() = "unsupported command 'newcommand'"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - Common::Redis::RespValue request; - makeBulkStringArray(request, {"newcommand", "hello"}); - EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"newcommand", "hello"}); + EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.splitter.unsupported_command").value()); } @@ -110,10 +112,10 @@ TEST_F(RedisCommandSplitterImplTest, UnsupportedCommand) { class RedisSingleServerRequestTest : public RedisCommandSplitterImplTest, public testing::WithParamInterface { public: - void makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request) { - EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(request), _)) + void makeRequest(const std::string& hash_key, Common::Redis::RespValuePtr&& request) { + EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(*request), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); } void fail() { @@ -142,9 +144,9 @@ TEST_P(RedisSingleServerRequestTest, Success) { std::string lower_command(GetParam()); table.toLowerCase(lower_command); - Common::Redis::RespValue request; - makeBulkStringArray(request, {GetParam(), "hello"}); - makeRequest("hello", request); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + makeRequest("hello", std::move(request)); EXPECT_NE(nullptr, handle_); time_system_.setMonotonicTime(std::chrono::milliseconds(10)); @@ -162,9 +164,9 @@ TEST_P(RedisSingleServerRequestTest, Success) { TEST_P(RedisSingleServerRequestTest, SuccessMultipleArgs) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {GetParam(), "hello", "123", "world"}); - makeRequest("hello", request); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello", "123", "world"}); + makeRequest("hello", std::move(request)); EXPECT_NE(nullptr, handle_); ToLowerTable table; @@ -186,9 +188,9 @@ TEST_P(RedisSingleServerRequestTest, SuccessMultipleArgs) { TEST_P(RedisSingleServerRequestTest, Fail) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {GetParam(), "hello"}); - makeRequest("hello", request); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + makeRequest("hello", std::move(request)); EXPECT_NE(nullptr, handle_); ToLowerTable table; @@ -209,9 +211,9 @@ TEST_P(RedisSingleServerRequestTest, Fail) { TEST_P(RedisSingleServerRequestTest, Cancel) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {GetParam(), "hello"}); - makeRequest("hello", request); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + makeRequest("hello", std::move(request)); EXPECT_NE(nullptr, handle_); EXPECT_CALL(pool_request_, cancel()); @@ -221,14 +223,14 @@ TEST_P(RedisSingleServerRequestTest, Cancel) { TEST_P(RedisSingleServerRequestTest, NoUpstream) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {GetParam(), "hello"}); - EXPECT_CALL(*conn_pool_, makeRequest("hello", Ref(request), _)).WillOnce(Return(nullptr)); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + EXPECT_CALL(*conn_pool_, makeRequest("hello", Ref(*request), _)).WillOnce(Return(nullptr)); Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); EXPECT_EQ(nullptr, handle_); }; @@ -241,24 +243,24 @@ INSTANTIATE_TEST_SUITE_P(RedisSimpleRequestCommandHandlerMixedCaseTests, TEST_F(RedisSingleServerRequestTest, PingSuccess) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {"ping"}); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"ping"}); Common::Redis::RespValue response; response.type(Common::Redis::RespType::SimpleString); response.asString() = "PONG"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); EXPECT_EQ(nullptr, handle_); }; TEST_F(RedisSingleServerRequestTest, EvalSuccess) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); - makeRequest("key", request); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); + makeRequest("key", std::move(request)); EXPECT_NE(nullptr, handle_); ToLowerTable table; @@ -280,9 +282,9 @@ TEST_F(RedisSingleServerRequestTest, EvalSuccess) { TEST_F(RedisSingleServerRequestTest, EvalShaSuccess) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {"EVALSHA", "return {ARGV[1]}", "1", "keykey", "arg"}); - makeRequest("keykey", request); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"EVALSHA", "return {ARGV[1]}", "1", "keykey", "arg"}); + makeRequest("keykey", std::move(request)); EXPECT_NE(nullptr, handle_); ToLowerTable table; @@ -304,38 +306,161 @@ TEST_F(RedisSingleServerRequestTest, EvalShaSuccess) { TEST_F(RedisSingleServerRequestTest, EvalWrongNumberOfArgs) { InSequence s; - Common::Redis::RespValue request; + Common::Redis::RespValuePtr request1{new Common::Redis::RespValue()}; + Common::Redis::RespValuePtr request2{new Common::Redis::RespValue()}; Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = "wrong number of arguments for 'eval' command"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - makeBulkStringArray(request, {"eval", "return {ARGV[1]}"}); - EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); + makeBulkStringArray(*request1, {"eval", "return {ARGV[1]}"}); + EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request1), callbacks_)); response.asString() = "wrong number of arguments for 'evalsha' command"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - makeBulkStringArray(request, {"evalsha", "return {ARGV[1]}", "1"}); - EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); + makeBulkStringArray(*request2, {"evalsha", "return {ARGV[1]}", "1"}); + EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request2), callbacks_)); }; TEST_F(RedisSingleServerRequestTest, EvalNoUpstream) { InSequence s; - Common::Redis::RespValue request; - makeBulkStringArray(request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); - EXPECT_CALL(*conn_pool_, makeRequest("key", Ref(request), _)).WillOnce(Return(nullptr)); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); + EXPECT_CALL(*conn_pool_, makeRequest("key", Ref(*request), _)).WillOnce(Return(nullptr)); Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); EXPECT_EQ(nullptr, handle_); EXPECT_EQ(1UL, store_.counter("redis.foo.command.eval.total").value()); EXPECT_EQ(1UL, store_.counter("redis.foo.command.eval.error").value()); }; +TEST_F(RedisSingleServerRequestTest, MovedRedirectionSuccess) { + InSequence s; + + Common::Redis::Client::MockPoolRequest pool_request2; + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"get", "foo"}); + makeRequest("foo", std::move(request)); + EXPECT_NE(nullptr, handle_); + + Common::Redis::RespValue moved_response; + moved_response.type(Common::Redis::RespType::Error); + moved_response.asString() = "MOVED 1111 10.1.2.3:4000"; + std::string host_address; + Common::Redis::RespValue request_copy; + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_))) + .WillOnce( + DoAll(SaveArg<0>(&host_address), SaveArg<1>(&request_copy), Return(&pool_request2))); + EXPECT_TRUE(pool_callbacks_->onRedirection(moved_response)); + EXPECT_EQ(host_address, "10.1.2.3:4000"); + EXPECT_EQ(request_copy.type(), Common::Redis::RespType::Array); + EXPECT_EQ(request_copy.asArray().size(), 2); + EXPECT_EQ(request_copy.asArray()[0].type(), Common::Redis::RespType::BulkString); + EXPECT_EQ(request_copy.asArray()[0].asString(), "get"); + EXPECT_EQ(request_copy.asArray()[1].type(), Common::Redis::RespType::BulkString); + EXPECT_EQ(request_copy.asArray()[1].asString(), "foo"); + + respond(); +}; + +TEST_F(RedisSingleServerRequestTest, MovedRedirectionFailure) { + InSequence s; + + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"get", "foo"}); + makeRequest("foo", std::move(request)); + EXPECT_NE(nullptr, handle_); + + // Test a truncated MOVED error response that cannot be parsed properly. + Common::Redis::RespValue moved_response; + moved_response.type(Common::Redis::RespType::Error); + moved_response.asString() = "MOVED 1111"; + EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response)); + moved_response.type(Common::Redis::RespType::Integer); + moved_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response)); + + respond(); +}; + +TEST_F(RedisSingleServerRequestTest, RedirectionFailure) { + InSequence s; + + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"get", "foo"}); + makeRequest("foo", std::move(request)); + EXPECT_NE(nullptr, handle_); + + // Test an error that looks like it might be a MOVED or ASK redirection error except for the first + // non-whitespace substring. + Common::Redis::RespValue moved_response; + moved_response.type(Common::Redis::RespType::Error); + moved_response.asString() = "NOTMOVEDORASK 1111 1.1.1.1:1"; + EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response)); + moved_response.type(Common::Redis::RespType::Integer); + moved_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response)); + + respond(); +}; + +TEST_F(RedisSingleServerRequestTest, AskRedirectionSuccess) { + InSequence s; + + Common::Redis::Client::MockPoolRequest pool_request2; + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"get", "foo"}); + makeRequest("foo", std::move(request)); + EXPECT_NE(nullptr, handle_); + + Common::Redis::RespValue ask_response; + ask_response.type(Common::Redis::RespType::Error); + ask_response.asString() = "ASK 1111 10.1.2.3:4000"; + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_))) + .WillOnce( + Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + // Verify that the request has been properly modified in place with an "asking" prefix. + std::vector commands = {"asking", "get", "foo"}; + EXPECT_EQ(host_address, "10.1.2.3:4000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), commands.size()); + for (unsigned int i = 0; i < commands.size(); i++) { + EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[i].asString(), commands[i]); + } + return &pool_request2; + })); + EXPECT_TRUE(pool_callbacks_->onRedirection(ask_response)); + respond(); +}; + +TEST_F(RedisSingleServerRequestTest, AskRedirectionFailure) { + InSequence s; + + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"get", "foo"}); + makeRequest("foo", std::move(request)); + EXPECT_NE(nullptr, handle_); + + Common::Redis::RespValue ask_response; + + // Test a truncated ASK error response that cannot be parsed properly. + ask_response.type(Common::Redis::RespType::Error); + ask_response.asString() = "ASK 1111"; + EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response)); + ask_response.type(Common::Redis::RespType::Integer); + ask_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response)); + + respond(); +}; + class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest { public: void setup(uint32_t num_gets, const std::list& null_handle_indexes) { @@ -344,8 +469,8 @@ class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest { request_strings.push_back(std::to_string(i)); } - Common::Redis::RespValue request; - makeBulkStringArray(request, request_strings); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, request_strings); std::vector tmp_expected_requests(num_gets); expected_requests_.swap(tmp_expected_requests); @@ -363,7 +488,7 @@ class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest { .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); } std::vector expected_requests_; @@ -536,6 +661,130 @@ TEST_F(RedisMGETCommandHandlerTest, Cancel) { handle_->cancel(); }; +TEST_F(RedisMGETCommandHandlerTest, NormalWithMovedRedirection) { + InSequence s; + + setup(2, {}); + EXPECT_NE(nullptr, handle_); + + // Test with a non-error response. + Common::Redis::RespValue bad_moved_response; + bad_moved_response.type(Common::Redis::RespType::Integer); + bad_moved_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_moved_response)); + + // Test with a valid MOVED response. + Common::Redis::RespValue moved_response; + moved_response.type(Common::Redis::RespType::Error); + moved_response.asString() = "MOVED 1234 192.168.0.1:5000"; // Exact values are not important. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 2); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "get"); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return &pool_requests_[i]; + })); + EXPECT_TRUE(pool_callbacks_[i]->onRedirection(moved_response)); + } + + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::BulkString); + elements[0].asString() = "response"; + elements[1].type(Common::Redis::RespType::BulkString); + elements[1].asString() = "5"; + expected_response.asArray().swap(elements); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::BulkString); + response2->asString() = "5"; + pool_callbacks_[1]->onResponse(std::move(response2)); + + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::BulkString); + response1->asString() = "response"; + time_system_.setMonotonicTime(std::chrono::milliseconds(10)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, "redis.foo.command.mget.latency"), 10)); + EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); + pool_callbacks_[0]->onResponse(std::move(response1)); + + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.total").value()); + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.success").value()); +}; + +TEST_F(RedisMGETCommandHandlerTest, NormalWithAskRedirection) { + InSequence s; + + setup(2, {}); + EXPECT_NE(nullptr, handle_); + + // Test with an non-error response. + Common::Redis::RespValue bad_ask_response; + bad_ask_response.type(Common::Redis::RespType::Integer); + bad_ask_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_ask_response)); + + // Test with a valid ASK response. + Common::Redis::RespValue ask_response; + ask_response.type(Common::Redis::RespType::Error); + ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 3); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "asking"); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), "get"); + EXPECT_TRUE(request.asArray()[2].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[2].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return &pool_requests_[i]; + })); + EXPECT_TRUE(pool_callbacks_[i]->onRedirection(ask_response)); + } + + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::BulkString); + elements[0].asString() = "response"; + elements[1].type(Common::Redis::RespType::BulkString); + elements[1].asString() = "5"; + expected_response.asArray().swap(elements); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::BulkString); + response2->asString() = "5"; + pool_callbacks_[1]->onResponse(std::move(response2)); + + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::BulkString); + response1->asString() = "response"; + time_system_.setMonotonicTime(std::chrono::milliseconds(10)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, "redis.foo.command.mget.latency"), 10)); + EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); + pool_callbacks_[0]->onResponse(std::move(response1)); + + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.total").value()); + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mget.success").value()); +}; + class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest { public: void setup(uint32_t num_sets, const std::list& null_handle_indexes) { @@ -547,8 +796,8 @@ class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest { request_strings.push_back(std::to_string(i)); } - Common::Redis::RespValue request; - makeBulkStringArray(request, request_strings); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, request_strings); std::vector tmp_expected_requests(num_sets); expected_requests_.swap(tmp_expected_requests); @@ -566,7 +815,7 @@ class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest { .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); } std::vector expected_requests_; @@ -654,13 +903,133 @@ TEST_F(RedisMSETCommandHandlerTest, WrongNumberOfArgs) { response.type(Common::Redis::RespType::Error); response.asString() = "wrong number of arguments for 'mset' command"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - Common::Redis::RespValue request; - makeBulkStringArray(request, {"mset", "foo", "bar", "fizz"}); - EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {"mset", "foo", "bar", "fizz"}); + EXPECT_EQ(nullptr, splitter_.makeRequest(std::move(request), callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value()); EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.error").value()); }; +TEST_F(RedisMSETCommandHandlerTest, NormalWithMovedRedirection) { + InSequence s; + + setup(2, {}); + EXPECT_NE(nullptr, handle_); + + // Test with a non-error response. + Common::Redis::RespValue bad_moved_response; + bad_moved_response.type(Common::Redis::RespType::Integer); + bad_moved_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_moved_response)); + + // Test with a valid MOVED response. + Common::Redis::RespValue moved_response; + moved_response.type(Common::Redis::RespType::Error); + moved_response.asString() = "MOVED 1234 192.168.0.1:5000"; // Exact values are not important. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 3); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "set"); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); + EXPECT_TRUE(request.asArray()[2].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[2].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return &pool_requests_[i]; + })); + EXPECT_TRUE(pool_callbacks_[i]->onRedirection(moved_response)); + } + + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::SimpleString); + expected_response.asString() = Response::get().OK; + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::SimpleString); + response2->asString() = Response::get().OK; + pool_callbacks_[1]->onResponse(std::move(response2)); + + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::SimpleString); + response1->asString() = Response::get().OK; + + time_system_.setMonotonicTime(std::chrono::milliseconds(10)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, "redis.foo.command.mset.latency"), 10)); + EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); + pool_callbacks_[0]->onResponse(std::move(response1)); + + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value()); + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.success").value()); +}; + +TEST_F(RedisMSETCommandHandlerTest, NormalWithAskRedirection) { + InSequence s; + + setup(2, {}); + EXPECT_NE(nullptr, handle_); + + // Test with a non-error response. + Common::Redis::RespValue bad_ask_response; + bad_ask_response.type(Common::Redis::RespType::Integer); + bad_ask_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_ask_response)); + + // Test with a valid ASK response. + Common::Redis::RespValue ask_response; + ask_response.type(Common::Redis::RespType::Error); + ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 4); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "asking"); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), "set"); + EXPECT_TRUE(request.asArray()[2].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[2].asString(), std::to_string(i)); + EXPECT_TRUE(request.asArray()[3].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[3].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return &pool_requests_[i]; + })); + EXPECT_TRUE(pool_callbacks_[i]->onRedirection(ask_response)); + } + + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::SimpleString); + expected_response.asString() = Response::get().OK; + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::SimpleString); + response2->asString() = Response::get().OK; + pool_callbacks_[1]->onResponse(std::move(response2)); + + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::SimpleString); + response1->asString() = Response::get().OK; + + time_system_.setMonotonicTime(std::chrono::milliseconds(10)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, "redis.foo.command.mset.latency"), 10)); + EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); + pool_callbacks_[0]->onResponse(std::move(response1)); + + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value()); + EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.success").value()); +}; + class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest, public testing::WithParamInterface { public: @@ -670,8 +1039,8 @@ class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest, request_strings.push_back(std::to_string(i)); } - Common::Redis::RespValue request; - makeBulkStringArray(request, request_strings); + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + makeBulkStringArray(*request, request_strings); std::vector tmp_expected_requests(num_commands); expected_requests_.swap(tmp_expected_requests); @@ -689,7 +1058,7 @@ class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest, .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_[i])), Return(request_to_use))); } - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); } std::vector expected_requests_; @@ -766,16 +1135,134 @@ TEST_P(RedisSplitKeysSumResultHandlerTest, NoUpstreamHostForAll) { EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".error").value()); }; +TEST_P(RedisSplitKeysSumResultHandlerTest, NormalWithMovedRedirection) { + InSequence s; + + setup(2, {}); + EXPECT_NE(nullptr, handle_); + + // Test with a non-error response. + Common::Redis::RespValue bad_moved_response; + bad_moved_response.type(Common::Redis::RespType::Integer); + bad_moved_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_moved_response)); + + // Test with a valid MOVED response. + Common::Redis::RespValue moved_response; + moved_response.type(Common::Redis::RespType::Error); + moved_response.asString() = "MOVED 1234 192.168.0.1:5000"; // Exact values are not important. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 2); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), GetParam()); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return &pool_requests_[i]; + })); + EXPECT_TRUE(pool_callbacks_[i]->onRedirection(moved_response)); + } + + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Integer); + expected_response.asInteger() = 2; + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Integer); + response2->asInteger() = 1; + pool_callbacks_[1]->onResponse(std::move(response2)); + + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Integer); + response1->asInteger() = 1; + time_system_.setMonotonicTime(std::chrono::milliseconds(10)); + EXPECT_CALL( + store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "redis.foo.command." + GetParam() + ".latency"), 10)); + EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); + pool_callbacks_[0]->onResponse(std::move(response1)); + + EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".total").value()); + EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".success").value()); +}; + +TEST_P(RedisSplitKeysSumResultHandlerTest, NormalWithAskRedirection) { + InSequence s; + + setup(2, {}); + EXPECT_NE(nullptr, handle_); + + // Test with a non-error response. + Common::Redis::RespValue bad_ask_response; + bad_ask_response.type(Common::Redis::RespType::Integer); + bad_ask_response.asInteger() = 1; + EXPECT_FALSE(pool_callbacks_[0]->onRedirection(bad_ask_response)); + + // Test with a valid ASK response. + Common::Redis::RespValue ask_response; + ask_response.type(Common::Redis::RespType::Error); + ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 3); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "asking"); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), GetParam()); + EXPECT_TRUE(request.asArray()[2].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[2].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return &pool_requests_[i]; + })); + EXPECT_TRUE(pool_callbacks_[i]->onRedirection(ask_response)); + } + + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Integer); + expected_response.asInteger() = 2; + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Integer); + response2->asInteger() = 1; + pool_callbacks_[1]->onResponse(std::move(response2)); + + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Integer); + response1->asInteger() = 1; + time_system_.setMonotonicTime(std::chrono::milliseconds(10)); + EXPECT_CALL( + store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "redis.foo.command." + GetParam() + ".latency"), 10)); + EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); + pool_callbacks_[0]->onResponse(std::move(response1)); + + EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".total").value()); + EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".success").value()); +}; + INSTANTIATE_TEST_SUITE_P( RedisSplitKeysSumResultHandlerTest, RedisSplitKeysSumResultHandlerTest, testing::ValuesIn(Common::Redis::SupportedCommands::hashMultipleSumResultCommands())); class RedisSingleServerRequestWithLatencyMicrosTest : public RedisSingleServerRequestTest { public: - void makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request) { - EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(request), _)) + void makeRequest(const std::string& hash_key, Common::Redis::RespValuePtr&& request) { + EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(*request), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); - handle_ = splitter_.makeRequest(request, callbacks_); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); } ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; @@ -790,9 +1277,9 @@ TEST_P(RedisSingleServerRequestWithLatencyMicrosTest, Success) { std::string lower_command(GetParam()); table.toLowerCase(lower_command); - Common::Redis::RespValue request; - makeBulkStringArray(request, {GetParam(), "hello"}); - makeRequest("hello", request); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + makeRequest("hello", std::move(request)); EXPECT_NE(nullptr, handle_); time_system_.setMonotonicTime(std::chrono::milliseconds(10)); diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index bd267cd1670d2..01d93c9a541f6 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -36,15 +36,17 @@ namespace ConnPool { class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client::ClientFactory { public: - void setup(bool cluster_exists = true) { + void setup(bool cluster_exists = true, bool hashtagging = true) { EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_)) .WillOnce(DoAll(SaveArgAddress(&update_callbacks_), ReturnNew())); if (!cluster_exists) { EXPECT_CALL(cm_, get("fake_cluster")).WillOnce(Return(nullptr)); } - conn_pool_ = std::make_unique(cluster_name_, cm_, *this, tls_, - Common::Redis::Client::createConnPoolSettings()); + conn_pool_ = std::make_unique( + cluster_name_, cm_, *this, tls_, + Common::Redis::Client::createConnPoolSettings(20, hashtagging, true)); + test_address_ = Network::Utility::resolveUrl("tcp://127.0.0.1:3000"); } void makeSimpleRequest(bool create_client) { @@ -56,6 +58,8 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client Common::Redis::RespValue value; Common::Redis::Client::MockPoolCallbacks callbacks; Common::Redis::Client::MockPoolRequest active_request; + EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address()) + .WillRepeatedly(Return(test_address_)); EXPECT_CALL(*client_, makeRequest(Ref(value), Ref(callbacks))) .WillOnce(Return(&active_request)); Common::Redis::Client::PoolRequest* request = @@ -77,6 +81,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client InstancePtr conn_pool_; Upstream::ClusterUpdateCallbacks* update_callbacks_{}; Common::Redis::Client::MockClient* client_{}; + Network::Address::InstanceConstSharedPtr test_address_; }; TEST_F(RedisConnPoolImplTest, Basic) { @@ -97,6 +102,8 @@ TEST_F(RedisConnPoolImplTest, Basic) { return cm_.thread_local_cluster_.lb_.host_; })); EXPECT_CALL(*this, create_(_)).WillOnce(Return(client)); + EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address()) + .WillRepeatedly(Return(test_address_)); EXPECT_CALL(*client, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request)); Common::Redis::Client::PoolRequest* request = conn_pool_->makeRequest("hash_key", value, callbacks); @@ -137,6 +144,40 @@ TEST_F(RedisConnPoolImplTest, Hashtagging) { tls_.shutdownThread(); }; +TEST_F(RedisConnPoolImplTest, HashtaggingNotEnabled) { + InSequence s; + + setup(true, false); // Test with hashtagging not enabled. + + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + + auto expectHashKey = [](const std::string& s) { + return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64(s)); + return nullptr; + }; + }; + + EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) + .WillOnce(Invoke(expectHashKey("{foo}.bar"))); + conn_pool_->makeRequest("{foo}.bar", value, callbacks); + + EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) + .WillOnce(Invoke(expectHashKey("foo{}{bar}"))); + conn_pool_->makeRequest("foo{}{bar}", value, callbacks); + + EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) + .WillOnce(Invoke(expectHashKey("foo{{bar}}zap"))); + conn_pool_->makeRequest("foo{{bar}}zap", value, callbacks); + + EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) + .WillOnce(Invoke(expectHashKey("foo{bar}{zap}"))); + conn_pool_->makeRequest("foo{bar}{zap}", value, callbacks); + + tls_.shutdownThread(); +}; + // Conn pool created when no cluster exists at creation time. Dynamic cluster creation and removal // work correctly. TEST_F(RedisConnPoolImplTest, NoClusterAtConstruction) { @@ -191,8 +232,8 @@ TEST_F(RedisConnPoolImplTest, HostRemove) { Common::Redis::Client::MockPoolCallbacks callbacks; Common::Redis::RespValue value; - std::shared_ptr host1(new Upstream::MockHost()); - std::shared_ptr host2(new Upstream::MockHost()); + std::shared_ptr host1(new Upstream::MockHost()); + std::shared_ptr host2(new Upstream::MockHost()); Common::Redis::Client::MockClient* client1 = new NiceMock(); Common::Redis::Client::MockClient* client2 = new NiceMock(); @@ -200,6 +241,7 @@ TEST_F(RedisConnPoolImplTest, HostRemove) { EXPECT_CALL(*this, create_(Eq(host1))).WillOnce(Return(client1)); Common::Redis::Client::MockPoolRequest active_request1; + EXPECT_CALL(*host1, address()).WillRepeatedly(Return(test_address_)); EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request1)); Common::Redis::Client::PoolRequest* request1 = conn_pool_->makeRequest("hash_key", value, callbacks); @@ -209,15 +251,22 @@ TEST_F(RedisConnPoolImplTest, HostRemove) { EXPECT_CALL(*this, create_(Eq(host2))).WillOnce(Return(client2)); Common::Redis::Client::MockPoolRequest active_request2; + EXPECT_CALL(*host2, address()).WillRepeatedly(Return(test_address_)); EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request2)); Common::Redis::Client::PoolRequest* request2 = conn_pool_->makeRequest("bar", value, callbacks); EXPECT_EQ(&active_request2, request2); EXPECT_CALL(*client2, close()); + EXPECT_CALL(*host2, address()).WillRepeatedly(Return(test_address_)); cm_.thread_local_cluster_.cluster_.prioritySet().getMockHostSet(0)->runCallbacks({}, {host2}); EXPECT_CALL(*client1, close()); tls_.shutdownThread(); + + ASSERT_TRUE(testing::Mock::VerifyAndClearExpectations(host1.get())); + ASSERT_TRUE(testing::Mock::VerifyAndClearExpectations(host2.get())); + testing::Mock::AllowLeak(host1.get()); + testing::Mock::AllowLeak(host2.get()); } TEST_F(RedisConnPoolImplTest, DeleteFollowedByClusterUpdateCallback) { @@ -255,6 +304,8 @@ TEST_F(RedisConnPoolImplTest, RemoteClose) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)); EXPECT_CALL(*this, create_(_)).WillOnce(Return(client)); + EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address()) + .WillRepeatedly(Return(test_address_)); EXPECT_CALL(*client, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request)); conn_pool_->makeRequest("hash_key", value, callbacks); @@ -266,6 +317,66 @@ TEST_F(RedisConnPoolImplTest, RemoteClose) { tls_.shutdownThread(); } +TEST_F(RedisConnPoolImplTest, makeRequestToHost) { + InSequence s; + + setup(false); + + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolRequest active_request1; + Common::Redis::Client::MockPoolRequest active_request2; + Common::Redis::Client::MockPoolCallbacks callbacks1; + Common::Redis::Client::MockPoolCallbacks callbacks2; + Common::Redis::Client::MockClient* client1 = new NiceMock(); + Common::Redis::Client::MockClient* client2 = new NiceMock(); + Upstream::HostConstSharedPtr host1; + Upstream::HostConstSharedPtr host2; + + // There is no cluster yet, so makeRequestToHost() should fail. + EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1)); + // Add the cluster now. + update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_); + + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1))); + EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks1))) + .WillOnce(Return(&active_request1)); + Common::Redis::Client::PoolRequest* request1 = + conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1); + EXPECT_EQ(&active_request1, request1); + EXPECT_EQ(host1->address()->asString(), "10.0.0.1:3000"); + + // IPv6 address returned from Redis server will not have square brackets + // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around + // the address. + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2))); + EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks2))) + .WillOnce(Return(&active_request2)); + Common::Redis::Client::PoolRequest* request2 = + conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2); + EXPECT_EQ(&active_request2, request2); + EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); + + // Test with a badly specified host address (no colon, no address, no port). + EXPECT_EQ(conn_pool_->makeRequestToHost("bad", value, callbacks1), nullptr); + // Test with a badly specified IPv4 address. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.bad:3000", value, callbacks1), nullptr); + // Test with a badly specified TCP port. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:bad", value, callbacks1), nullptr); + // Test with a TCP port outside of the acceptable range for a 32-bit integer. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:4294967297", value, callbacks1), + nullptr); // 2^32 + 1 + // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535). + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:65536", value, callbacks1), nullptr); + // Test with a badly specified IPv6-like address. + EXPECT_EQ(conn_pool_->makeRequestToHost("bad:ipv6:3000", value, callbacks1), nullptr); + // Test with a valid IPv6 address and a badly specified TCP port (out of range). + EXPECT_EQ(conn_pool_->makeRequestToHost("2001:470:813b:::70000", value, callbacks1), nullptr); + + EXPECT_CALL(*client2, close()); + EXPECT_CALL(*client1, close()); + tls_.shutdownThread(); +} + } // namespace ConnPool } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index 19c724ac74478..ecd104af4cd18 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -29,6 +29,10 @@ class MockInstance : public Instance { Common::Redis::Client::PoolRequest*( const std::string& hash_key, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks& callbacks)); + MOCK_METHOD3(makeRequestToHost, + Common::Redis::Client::PoolRequest*( + const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks)); }; } // namespace ConnPool @@ -58,9 +62,9 @@ class MockInstance : public Instance { MockInstance(); ~MockInstance(); - SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, + SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks) override { - return SplitRequestPtr{makeRequest_(request, callbacks)}; + return SplitRequestPtr{makeRequest_(*request, callbacks)}; } MOCK_METHOD2(makeRequest_, diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index 53b62dbb2da43..ab727c0f846c6 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -7,12 +7,18 @@ #include "gtest/gtest.h" +using testing::Return; + namespace RedisCmdSplitter = Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitter; namespace Envoy { namespace { -const std::string REDIS_PROXY_CONFIG = R"EOF( +// This is a basic redis_proxy configuration with 2 endpoints/hosts +// in the cluster. The load balancing policy must be set +// to random for proper test operation. + +const std::string CONFIG = R"EOF( admin: access_log_path: /dev/null address: @@ -21,11 +27,23 @@ const std::string REDIS_PROXY_CONFIG = R"EOF( port_value: 0 static_resources: clusters: - name: cluster_0 - hosts: - socket_address: - address: 127.0.0.1 - port_value: 0 + - name: cluster_0 + type: STATIC + lb_policy: RANDOM + load_assignment: + cluster_name: cluster_0 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 0 listeners: name: listener_0 address: @@ -42,6 +60,13 @@ const std::string REDIS_PROXY_CONFIG = R"EOF( op_timeout: 5s )EOF"; +// This is a configuration with moved/ask redirection support enabled. +const std::string CONFIG_WITH_REDIRECTION = CONFIG + R"EOF( + enable_redirection: true +)EOF"; + +// This function encodes commands as an array of bulkstrings as transmitted by Redis clients to +// Redis servers, according to the Redis protocol. std::string makeBulkStringArray(std::vector&& command_strings) { std::stringstream result; @@ -57,23 +82,159 @@ std::string makeBulkStringArray(std::vector&& command_strings) { class RedisProxyIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { public: - RedisProxyIntegrationTest() : BaseIntegrationTest(GetParam(), REDIS_PROXY_CONFIG) {} + RedisProxyIntegrationTest(const std::string& config = CONFIG, int num_upstreams = 2) + : BaseIntegrationTest(GetParam(), config), num_upstreams_(num_upstreams), + version_(GetParam()) {} ~RedisProxyIntegrationTest() override { test_server_.reset(); fake_upstreams_.clear(); } + // This method encodes a fake upstream's IP address and TCP port in the + // same format as one would expect from a Redis server in + // an ask/moved redirection error. + + std::string redisAddressAndPort(FakeUpstreamPtr& upstream) { + std::stringstream result; + if (version_ == Network::Address::IpVersion::v4) { + result << "127.0.0.1" + << ":"; + } else { + result << "::1" + << ":"; + } + result << upstream->localAddress()->ip()->port(); + return result.str(); + } + void initialize() override; + + /** + * Simple bi-directional test between a fake Redis client and Redis server. + * @param request supplies Redis client data to transmit to the Redis server. + * @param response supplies Redis server data to transmit to the client. + */ + void simpleRequestAndResponse(const std::string& request, const std::string& response); + /** + * Simple bi-directional test between a fake Redis client and proxy server. + * @param request supplies Redis client data to transmit to the proxy. + * @param proxy_response supplies proxy data in response to the client's request. + */ + void simpleProxyResponse(const std::string& request, const std::string& proxy_response); + +protected: + Runtime::MockRandomGenerator* mock_rng_{}; + const int num_upstreams_; + const Network::Address::IpVersion version_; +}; + +class RedisProxyWithRedirectionIntegrationTest : public RedisProxyIntegrationTest { +public: + RedisProxyWithRedirectionIntegrationTest() + : RedisProxyIntegrationTest(CONFIG_WITH_REDIRECTION, 2) {} + + /** + * Simple bi-directional test with a fake Redis client and 2 fake Redis servers. + * @param target_server a handle to the second server that will respond to the request. + * @param request supplies client data to transmit to the first upstream server. + * @param redirection_response supplies the moved or ask redirection error from the first server. + * @param received_request suplies data received by the second server from the proxy. + * @param response supplies data sent by the second server back to the fake Redis client. + */ + void simpleRedirection(FakeUpstreamPtr& target_server, const std::string& request, + const std::string& redirection_response, + const std::string& received_request, const std::string& response); }; INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithRedirectionIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + void RedisProxyIntegrationTest::initialize() { + setUpstreamCount(num_upstreams_); + setDeterministic(); config_helper_.renameListener("redis_proxy"); BaseIntegrationTest::initialize(); + + mock_rng_ = dynamic_cast(&test_server_->server().random()); + // Abort now if we cannot downcast the server's random number generator pointer. + ASSERT_TRUE(mock_rng_ != nullptr); + // Ensure that fake_upstreams_[0] is the load balancer's host of choice by default. + ON_CALL(*mock_rng_, random()).WillByDefault(Return(0)); +} + +void RedisProxyIntegrationTest::simpleRequestAndResponse(const std::string& request, + const std::string& response) { + std::string proxy_to_server; + IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); + redis_client->write(request); + + FakeRawConnectionPtr fake_upstream_connection; + EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + EXPECT_TRUE(fake_upstream_connection->waitForData(request.size(), &proxy_to_server)); + // The original request should be the same as the data received by the server. + EXPECT_EQ(request, proxy_to_server); + + EXPECT_TRUE(fake_upstream_connection->write(response)); + redis_client->waitForData(response); + // The original response should be received by the fake Redis client. + EXPECT_EQ(response, redis_client->data()); + + redis_client->close(); + EXPECT_TRUE(fake_upstream_connection->close()); +} + +void RedisProxyIntegrationTest::simpleProxyResponse(const std::string& request, + const std::string& proxy_response) { + IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); + redis_client->write(request); + redis_client->waitForData(proxy_response); + // After sending the request to the proxy, the fake redis client should receive proxy_response. + EXPECT_EQ(proxy_response, redis_client->data()); + redis_client->close(); +} + +void RedisProxyWithRedirectionIntegrationTest::simpleRedirection( + FakeUpstreamPtr& target_server, const std::string& request, + const std::string& redirection_response, const std::string& received_request, + const std::string& response) { + std::string proxy_to_server; + IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); + redis_client->write(request); + + FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; + + // Data from the client should always be routed to fake_upstreams_[0] by the load balancer. + EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_1)); + EXPECT_TRUE(fake_upstream_connection_1->waitForData(request.size(), &proxy_to_server)); + // The data in request should be received by the first server, fake_upstreams_[0]. + EXPECT_EQ(request, proxy_to_server); + proxy_to_server.clear(); + + // Send the redirection_response from the first fake Redis server back to the proxy. + EXPECT_TRUE(fake_upstream_connection_1->write(redirection_response)); + // The proxy should initiate a new connection to the fake redis server, target_server, in + // response. + EXPECT_TRUE(target_server->waitForRawConnection(fake_upstream_connection_2)); + // The server, target_server, should receive received_request which may or may not be the same as + // the original request. + EXPECT_TRUE(fake_upstream_connection_2->waitForData(received_request.size(), &proxy_to_server)); + EXPECT_EQ(received_request, proxy_to_server); + + // Send response from the second fake Redis server, target_server, to the client. + EXPECT_TRUE(fake_upstream_connection_2->write(response)); + redis_client->waitForData(response); + // The client should receive response unchanged. + EXPECT_EQ(response, redis_client->data()); + + redis_client->close(); + EXPECT_TRUE(fake_upstream_connection_1->close()); + EXPECT_TRUE(fake_upstream_connection_2->close()); } // This test sends a simple "get foo" command from a fake @@ -84,53 +245,130 @@ void RedisProxyIntegrationTest::initialize() { TEST_P(RedisProxyIntegrationTest, SimpleRequestAndResponse) { initialize(); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("redis_proxy")); + simpleRequestAndResponse(makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n"); +} - std::string client_to_proxy = makeBulkStringArray({"get", "foo"}); - std::string proxy_to_server; +// This test sends an invalid Redis command from a fake +// downstream client to the envoy proxy. Envoy will respond +// with an invalid request error. - EXPECT_TRUE(client_to_proxy.size() > 0); - EXPECT_TRUE(client_to_proxy.find("get") != std::string::npos); - EXPECT_TRUE(client_to_proxy.find("foo") != std::string::npos); - tcp_client->write(client_to_proxy); +TEST_P(RedisProxyIntegrationTest, InvalidRequest) { + std::stringstream error_response; + error_response << "-" << RedisCmdSplitter::Response::get().InvalidRequest << "\r\n"; + initialize(); + simpleProxyResponse(makeBulkStringArray({"foo"}), error_response.str()); +} - FakeRawConnectionPtr fake_upstream_connection; - EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); - EXPECT_TRUE(fake_upstream_connection->waitForData(client_to_proxy.size(), &proxy_to_server)); - EXPECT_EQ(client_to_proxy, proxy_to_server); +// This test sends a simple Redis command to a fake upstream +// Redis server. The server replies with a MOVED or ASK redirection +// error, and that error is passed unchanged to the fake downstream +// since redirection support has not been enabled (by default). - std::string server_to_proxy = "$3\r\nbar\r\n"; // bulkstring reply of "bar" +TEST_P(RedisProxyIntegrationTest, RedirectWhenNotEnabled) { + std::string request = makeBulkStringArray({"get", "foo"}); + initialize(); + if (version_ == Network::Address::IpVersion::v4) { + simpleRequestAndResponse(request, "-MOVED 1111 127.0.0.1:34123\r\n"); + simpleRequestAndResponse(request, "-ASK 1111 127.0.0.1:34123\r\n"); + } else { + simpleRequestAndResponse(request, "-MOVED 1111 ::1:34123\r\n"); + simpleRequestAndResponse(request, "-ASK 1111 ::1:34123\r\n"); + } +} - EXPECT_TRUE(fake_upstream_connection->write(server_to_proxy)); - tcp_client->waitForData(server_to_proxy); - EXPECT_EQ(server_to_proxy, tcp_client->data()); +// This test sends a simple Redis command to a sequence of fake upstream +// Redis servers. The first server replies with a MOVED or ASK redirection +// error that specifies the second upstream server in the static configuration +// as its target. The target server responds to a possibly transformed +// request, and its response is received unchanged by the fake Redis client. - tcp_client->close(); - EXPECT_TRUE(fake_upstream_connection->close()); +TEST_P(RedisProxyWithRedirectionIntegrationTest, RedirectToKnownServer) { + std::string request = makeBulkStringArray({"get", "foo"}); + initialize(); + std::stringstream redirection_error; + redirection_error << "-MOVED 1111 " << redisAddressAndPort(fake_upstreams_[1]) << "\r\n"; + simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), request, "$3\r\nbar\r\n"); + + redirection_error.str(""); + redirection_error << "-ASK 1111 " << redisAddressAndPort(fake_upstreams_[1]) << "\r\n"; + simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), + makeBulkStringArray({"asking", "get", "foo"}), "$3\r\nbar\r\n"); } -// This test sends an invalid Redis command from a fake -// downstream client to the envoy proxy. Envoy will respond -// with an invalid request error. +// This test sends a simple Redis commands to a sequence of fake upstream +// Redis servers. The first server replies with a MOVED or ASK redirection +// error that specifies an unknown upstream server not in its static configuration +// as its target. The target server responds to a possibly transformed +// request, and its response is received unchanged by the fake Redis client. -TEST_P(RedisProxyIntegrationTest, InvalidRequest) { +TEST_P(RedisProxyWithRedirectionIntegrationTest, RedirectToUnknownServer) { + std::string request = makeBulkStringArray({"get", "foo"}); initialize(); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("redis_proxy")); - std::string client_to_proxy = makeBulkStringArray({"foo"}); + auto endpoint = + Network::Utility::parseInternetAddress(Network::Test::getAnyAddressString(version_), 0); + FakeUpstreamPtr target_server{ + new FakeUpstream(endpoint, upstreamProtocol(), timeSystem(), enable_half_close_)}; - EXPECT_TRUE(client_to_proxy.size() > 0); - EXPECT_TRUE(client_to_proxy.find("foo") != std::string::npos); - tcp_client->write(client_to_proxy); + std::stringstream redirection_error; + redirection_error << "-MOVED 1111 " << redisAddressAndPort(target_server) << "\r\n"; + simpleRedirection(target_server, request, redirection_error.str(), request, "$3\r\nbar\r\n"); - std::stringstream error_response; - error_response << "-" << RedisCmdSplitter::Response::get().InvalidRequest << "\r\n"; - std::string proxy_to_client = error_response.str(); + redirection_error.str(""); + redirection_error << "-ASK 1111 " << redisAddressAndPort(target_server) << "\r\n"; + simpleRedirection(target_server, request, redirection_error.str(), + makeBulkStringArray({"asking", "get", "foo"}), "$3\r\nbar\r\n"); +} - tcp_client->waitForData(proxy_to_client); - EXPECT_EQ(proxy_to_client, tcp_client->data()); +// This test verifies that various forms of bad MOVED/ASK redirection errors +// from a fake Redis server are not acted upon, and are passed unchanged +// to the fake Redis client. + +TEST_P(RedisProxyWithRedirectionIntegrationTest, BadRedirectStrings) { + initialize(); + std::string request = makeBulkStringArray({"get", "foo"}); - tcp_client->close(); + // Test with truncated moved errors. + simpleRequestAndResponse(request, "-MOVED 1111\r\n"); + simpleRequestAndResponse(request, "-MOVED\r\n"); + // Test with truncated ask errors. + simpleRequestAndResponse(request, "-ASK 1111\r\n"); + simpleRequestAndResponse(request, "-ASK\r\n"); + // Test with a badly specified IP address and TCP port field. + simpleRequestAndResponse(request, "-MOVED 2222 badfield\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 badfield\r\n"); + // Test with a bad IP address specification. + if (version_ == Network::Address::IpVersion::v4) { + simpleRequestAndResponse(request, "-MOVED 2222 127.0:3333\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 127.0:3333\r\n"); + } else { + simpleRequestAndResponse(request, "-MOVED 2222 ::11111:3333\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 ::11111:3333\r\n"); + } + // Test with a bad IP address specification (not numeric). + if (version_ == Network::Address::IpVersion::v4) { + simpleRequestAndResponse(request, "-MOVED 2222 badaddress:3333\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 badaddress:3333\r\n"); + } else { + simpleRequestAndResponse(request, "-MOVED 2222 badaddress:3333\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 badaddress:3333\r\n"); + } + // Test with a bad TCP port specification (out of range). + if (version_ == Network::Address::IpVersion::v4) { + simpleRequestAndResponse(request, "-MOVED 2222 127.0.0.1:100000\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 127.0.0.1:100000\r\n"); + } else { + simpleRequestAndResponse(request, "-MOVED 2222 ::1:1000000\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 ::1:1000000\r\n"); + } + // Test with a bad TCP port specification (not numeric). + if (version_ == Network::Address::IpVersion::v4) { + simpleRequestAndResponse(request, "-MOVED 2222 127.0.0.1:badport\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 127.0.0.1:badport\r\n"); + } else { + simpleRequestAndResponse(request, "-MOVED 2222 ::1:badport\r\n"); + simpleRequestAndResponse(request, "-ASK 2222 ::1:badport\r\n"); + } } } // namespace diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 68b9c99ff9e6c..9b572665021ec 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -393,6 +393,47 @@ TEST_F(RedisHealthCheckerTest, Exists) { EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.failure").value()); } +TEST_F(RedisHealthCheckerTest, ExistsRedirected) { + InSequence s; + setupExistsHealthcheck(); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectClientCreate(); + expectExistsRequestCreate(); + health_checker_->start(); + + client_->runHighWatermarkCallbacks(); + client_->runLowWatermarkCallbacks(); + + // Success with moved redirection + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_)); + NetworkFilters::Common::Redis::RespValue moved_response; + moved_response.type(NetworkFilters::Common::Redis::RespType::Error); + moved_response.asString() = "MOVED 1111 127.0.0.1:81"; // exact values not important + pool_callbacks_->onRedirection(moved_response); + + expectExistsRequestCreate(); + interval_timer_->callback_(); + + // Success with ask redirection + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_)); + NetworkFilters::Common::Redis::RespValue ask_response; + ask_response.type(NetworkFilters::Common::Redis::RespType::Error); + ask_response.asString() = "ASK 1111 127.0.0.1:81"; // exact values not important + pool_callbacks_->onRedirection(ask_response); + + EXPECT_CALL(*client_, close()); + + EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.attempt").value()); + EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.success").value()); + EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter("health_check.failure").value()); +} + // Tests that redis client will behave appropriately when reuse_connection is false. TEST_F(RedisHealthCheckerTest, NoConnectionReuse) { InSequence s; From 03cf28658399a7e411e49f9718782c919578d099 Mon Sep 17 00:00:00 2001 From: larrywest Date: Mon, 8 Apr 2019 12:24:56 -0700 Subject: [PATCH 076/165] extensions: make jwt_authn filter token-parsing more flexible (#6355) (#6384) Description: modifies jwt_authn filter's ExtractorImpl extract method to use the from_headers's value_prefix tag more precisely, allowing syntax like "tag=,other=xxx" rather than simply taking the remainder of the string as the JWT candidate. See Issue #6355 for full description. Should be backwards-compatible with existing uses of jwt_authn. Risk Level: Medium (scope: affects JWT authentication) Testing: Unit testing was added to test/.../jwt_authn/extractor_test.cc. Since Extractor itself does not validate the JWT, only the parsing is tested. Docs Changes: see "Further header options" section added to api/envoy/config/filter/http/jwt_authn/v2alpha/README.md Release Notes: Adds enhancement per Issue #6355, so that deployments can use a wider variety of HTTP header syntaxes to pass JWTs and have them authenticated by the jwt_authn filter. Backwards-compatible with existing usage. JWT authentication with the jwt_authn HTTP filter now permits header syntax like the following: Authorization: Bespoke jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123 Signed-off-by: Larry West --- .../filter/http/jwt_authn/v2alpha/README.md | 35 +++++++++++ docs/root/intro/version_history.rst | 1 + .../filters/http/jwt_authn/extractor.cc | 41 +++++++++++- test/extensions/filters/http/jwt_authn/BUILD | 1 + .../filters/http/jwt_authn/extractor_test.cc | 62 +++++++++++++++++++ 5 files changed, 137 insertions(+), 3 deletions(-) diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md index 9d083389a5aea..c390a4d5ce506 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md @@ -29,3 +29,38 @@ If a custom location is desired, `from_headers` or `from_params` can be used to ## HTTP header to pass successfully verified JWT If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64 encoded JWT payload in JSON. + + +## Further header options + +In addition to the `name` field, which specifies the HTTP header name, +the `from_headers` section can specify an optional `value_prefix` value, as in: + +```yaml + from_headers: + - name: bespoke + value_prefix: jwt_value +``` + +The above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`. + +Any non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped, +and all following, contiguous, JWT-legal chars will be taken as the JWT. + +This means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`: + +```text +bespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk + +bespoke: {"jwt_value": "eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk"} + +bespoke: beta:true,jwt_value:"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk",trace=1234 +``` + +The header `name` may be `Authorization`. + +The `value_prefix` must match exactly, i.e., case-sensitively. +If the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token. + +If there are no JWT-legal characters after the `value_prefix`, the entire string after it +is taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser. \ No newline at end of file diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 04048c2037ade..5551b95823419 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -53,6 +53,7 @@ Version history * http: added :ref:`max request headers size `. The default behaviour is unchanged. * http: added modifyDecodingBuffer/modifyEncodingBuffer to allow modifying the buffered request/response data. * http: added encodeComplete/decodeComplete. These are invoked at the end of the stream, after all data has been encoded/decoded respectively. Default implementation is a no-op. +* jwt_authn: make filter's parsing of JWT more flexible, allowing syntax like ``jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123`` * outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. * mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy` for more details. * performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index 979275981d735..9e0bd9ea64043 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -102,6 +102,10 @@ class ExtractorImpl : public Extractor { // ctor helper for a jwt provider config void addProvider(const JwtProvider& provider); + // @return what should be the 3-part base64url-encoded substring; see RFC-7519 + absl::string_view extractJWT(absl::string_view value_str, + absl::string_view::size_type after) const; + // HeaderMap value type to store prefix and issuers that specified this // header. struct HeaderLocationSpec { @@ -181,11 +185,12 @@ std::vector ExtractorImpl::extract(const Http::HeaderMap& h if (entry) { auto value_str = entry->value().getStringView(); if (!location_spec->value_prefix_.empty()) { - if (!absl::StartsWith(value_str, location_spec->value_prefix_)) { - // prefix doesn't match, skip it. + const auto pos = value_str.find(location_spec->value_prefix_); + if (pos == absl::string_view::npos) { + // value_prefix not found anywhere in value_str, so skip continue; } - value_str = value_str.substr(location_spec->value_prefix_.size()); + value_str = extractJWT(value_str, pos + location_spec->value_prefix_.length()); } tokens.push_back(std::make_unique( std::string(value_str), location_spec->specified_issuers_, location_spec->header_)); @@ -211,6 +216,36 @@ std::vector ExtractorImpl::extract(const Http::HeaderMap& h return tokens; } +// as specified in RFC-4648 § 5, plus dot (period, 0x2e), of which two are required in the JWT +constexpr absl::string_view ConstantBase64UrlEncodingCharsPlusDot = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_."; + +// Returns a token, not a URL: skips non-Base64Url-legal (or dot) characters, collects following +// Base64Url+dot string until first non-Base64Url char. +// +// The input parameters: +// "value_str" - the header value string, perhaps "Bearer string....", and +// "after" - the offset into that string after which to begin looking for JWT-legal characters +// +// For backwards compatibility, if it finds no suitable string, it returns value_str as-is. +// +// It is forgiving w.r.t. dots/periods, as the exact syntax will be verified after extraction. +// +// See RFC-7519 § 2, RFC-7515 § 2, and RFC-4648 "Base-N Encodings" § 5. +absl::string_view ExtractorImpl::extractJWT(absl::string_view value_str, + absl::string_view::size_type after) const { + const auto starting = value_str.find_first_of(ConstantBase64UrlEncodingCharsPlusDot, after); + if (starting == value_str.npos) { + return value_str; + } + // There should be two dots (periods; 0x2e) inside the string, but we don't verify that here + auto ending = value_str.find_first_not_of(ConstantBase64UrlEncodingCharsPlusDot, starting); + if (ending == value_str.npos) { // Base64Url-encoded string occupies the rest of the line + return value_str.substr(starting); + } + return value_str.substr(starting, ending - starting); +} + void ExtractorImpl::sanitizePayloadHeaders(Http::HeaderMap& headers) const { for (const auto& header : forward_payload_headers_) { headers.remove(header); diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index b225c4510ed08..c09a8a9a97749 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -34,6 +34,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.jwt_authn", deps = [ "//source/extensions/filters/http/jwt_authn:extractor_lib", + "//test/extensions/filters/http/jwt_authn:test_common_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/extensions/filters/http/jwt_authn/extractor_test.cc b/test/extensions/filters/http/jwt_authn/extractor_test.cc index 58b902fbed5e4..05d9c340d3bd3 100644 --- a/test/extensions/filters/http/jwt_authn/extractor_test.cc +++ b/test/extensions/filters/http/jwt_authn/extractor_test.cc @@ -2,6 +2,7 @@ #include "extensions/filters/http/jwt_authn/extractor.h" +#include "test/extensions/filters/http/jwt_authn/test_common.h" #include "test/test_common/utility.h" using ::envoy::config::filter::http::jwt_authn::v2alpha::JwtAuthentication; @@ -46,6 +47,16 @@ const char ExampleConfig[] = R"( from_headers: - name: prefix-header value_prefix: AAABBB + provider7: + issuer: issuer7 + from_headers: + - name: prefix-header + value_prefix: CCCDDD + provider8: + issuer: issuer8 + from_headers: + - name: prefix-header + value_prefix: '"CCCDDD"' )"; class ExtractorTest : public testing::Test { @@ -102,6 +113,19 @@ TEST_F(ExtractorTest, TestDefaultHeaderLocation) { EXPECT_FALSE(headers.Authorization()); } +// Test extracting JWT as Bearer token from the default header location: "Authorization" - +// using an actual (correctly-formatted) JWT: +TEST_F(ExtractorTest, TestDefaultHeaderLocationWithValidJWT) { + auto headers = + TestHeaderMapImpl{{absl::StrCat("Authorization"), absl::StrCat("Bearer ", GoodToken)}}; + auto tokens = extractor_->extract(headers); + EXPECT_EQ(tokens.size(), 1); + + // Only the issue1 is using default header location. + EXPECT_EQ(tokens[0]->token(), GoodToken); + EXPECT_TRUE(tokens[0]->isIssuerSpecified("issuer1")); +} + // Test extracting token from the default query parameter: "access_token" TEST_F(ExtractorTest, TestDefaultParamLocation) { auto headers = TestHeaderMapImpl{{":path", "/path?access_token=jwt_token"}}; @@ -172,6 +196,44 @@ TEST_F(ExtractorTest, TestPrefixHeaderMatch) { EXPECT_FALSE(headers.get(Http::LowerCaseString("prefix-header"))); } +// Test extracting token from the custom header: "prefix-header" +// The value is found after the "CCCDDD", then between the '=' and the ','. +TEST_F(ExtractorTest, TestPrefixHeaderFlexibleMatch1) { + auto headers = TestHeaderMapImpl{{"prefix-header", "preamble CCCDDD=jwt_token,extra=more"}}; + auto tokens = extractor_->extract(headers); + EXPECT_EQ(tokens.size(), 1); + + // Match issuer 7 with map key as: prefix-header + 'CCCDDD' + EXPECT_TRUE(tokens[0]->isIssuerSpecified("issuer7")); + EXPECT_EQ(tokens[0]->token(), "jwt_token"); +} + +TEST_F(ExtractorTest, TestPrefixHeaderFlexibleMatch2) { + auto headers = + TestHeaderMapImpl{{"prefix-header", "CCCDDD=\"and0X3Rva2Vu\",comment=\"fish tag\""}}; + auto tokens = extractor_->extract(headers); + EXPECT_EQ(tokens.size(), 1); + + // Match issuer 7 with map key as: prefix-header + AAA + EXPECT_TRUE(tokens[0]->isIssuerSpecified("issuer7")); + EXPECT_EQ(tokens[0]->token(), "and0X3Rva2Vu"); +} + +TEST_F(ExtractorTest, TestPrefixHeaderFlexibleMatch3) { + auto headers = TestHeaderMapImpl{ + {"prefix-header", "creds={\"authLevel\": \"20\", \"CCCDDD\": \"and0X3Rva2Vu\"}"}}; + auto tokens = extractor_->extract(headers); + EXPECT_EQ(tokens.size(), 2); + + // Match issuer 8 with map key as: prefix-header + '"CCCDDD"' + EXPECT_TRUE(tokens[0]->isIssuerSpecified("issuer8")); + EXPECT_EQ(tokens[0]->token(), "and0X3Rva2Vu"); + + // Match issuer 7 with map key as: prefix-header + 'CCCDDD' + EXPECT_TRUE(tokens[1]->isIssuerSpecified("issuer7")); + EXPECT_EQ(tokens[1]->token(), "and0X3Rva2Vu"); +} + // Test extracting token from the custom query parameter: "token_param" TEST_F(ExtractorTest, TestCustomParamToken) { auto headers = TestHeaderMapImpl{{":path", "/path?token_param=jwt_token"}}; From fa69fad0de6b63a254ce6a9f8164b31163a1ada0 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 8 Apr 2019 12:40:46 -0700 Subject: [PATCH 077/165] ci: remove ci workspace (#6229) Description: We no longer have prebuilt dependencies, so ci/WORKSPACE is no longer needed. Simplifies how CI runs. Risk Level: Med for depending projects Testing: CI Docs Changes: Release Notes: Signed-off-by: Lizan Zhou --- .bazelignore | 2 + WORKSPACE | 8 +-- bazel/api_repositories.bzl | 35 ++++++++++++ bazel/repositories.bzl | 57 ++++--------------- ci/WORKSPACE | 29 ---------- ci/WORKSPACE.filter.example | 6 +- ci/build_setup.sh | 16 +----- ci/do_ci.sh | 39 +++---------- ci/run_clang_tidy.sh | 14 +++-- .../grpc_client_integration_test_harness.h | 2 +- 10 files changed, 75 insertions(+), 133 deletions(-) create mode 100644 .bazelignore create mode 100644 bazel/api_repositories.bzl delete mode 100644 ci/WORKSPACE diff --git a/.bazelignore b/.bazelignore new file mode 100644 index 0000000000000..04680184abec6 --- /dev/null +++ b/.bazelignore @@ -0,0 +1,2 @@ +api +examples/grpc-bridge/script diff --git a/WORKSPACE b/WORKSPACE index ec06147ab36f5..5609189bd56df 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,5 +1,9 @@ workspace(name = "envoy") +load("//bazel:api_repositories.bzl", "envoy_api_dependencies") + +envoy_api_dependencies() + load("//bazel:repositories.bzl", "GO_VERSION", "envoy_dependencies") load("//bazel:cc_configure.bzl", "cc_configure") @@ -11,10 +15,6 @@ rules_foreign_cc_dependencies() cc_configure() -load("@envoy_api//bazel:repositories.bzl", "api_dependencies") - -api_dependencies() - load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") go_rules_dependencies() diff --git a/bazel/api_repositories.bzl b/bazel/api_repositories.bzl new file mode 100644 index 0000000000000..016fb16c8a2ee --- /dev/null +++ b/bazel/api_repositories.bzl @@ -0,0 +1,35 @@ +def _default_envoy_api_impl(ctx): + ctx.file("WORKSPACE", "") + ctx.file("BUILD.bazel", "") + api_dirs = [ + "bazel", + "docs", + "envoy", + "examples", + "test", + "tools", + ] + for d in api_dirs: + ctx.symlink(ctx.path(ctx.attr.api).dirname.get_child(d), d) + +_default_envoy_api = repository_rule( + implementation = _default_envoy_api_impl, + attrs = { + "api": attr.label(default = "@envoy//api:BUILD"), + }, +) + +def envoy_api_dependencies(): + # Treat the data plane API as an external repo, this simplifies exporting the API to + # https://github.com/envoyproxy/data-plane-api. + if "envoy_api" not in native.existing_rules().keys(): + _default_envoy_api(name = "envoy_api") + + native.bind( + name = "api_httpbody_protos", + actual = "@googleapis//:api_httpbody_protos", + ) + native.bind( + name = "http_api_protos", + actual = "@googleapis//:http_api_protos", + ) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index f2cfc4cfe6c1a..a6b762243efeb 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1,6 +1,6 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":genrule_repository.bzl", "genrule_repository") -load("//api/bazel:envoy_http_archive.bzl", "envoy_http_archive") +load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") load( "@bazel_tools//tools/cpp:windows_cc_configure.bzl", @@ -8,6 +8,7 @@ load( "setup_vc_env_vars", ) load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "get_env_var") +load("@envoy_api//bazel:repositories.bzl", "api_dependencies") # dict of {build recipe name: longform extension name,} PPC_SKIP_TARGETS = {"luajit": "envoy.filters.http.lua"} @@ -38,27 +39,6 @@ _default_envoy_build_config = repository_rule( }, ) -def _default_envoy_api_impl(ctx): - ctx.file("WORKSPACE", "") - ctx.file("BUILD.bazel", "") - api_dirs = [ - "bazel", - "docs", - "envoy", - "examples", - "test", - "tools", - ] - for d in api_dirs: - ctx.symlink(ctx.path(ctx.attr.api).dirname.get_child(d), d) - -_default_envoy_api = repository_rule( - implementation = _default_envoy_api_impl, - attrs = { - "api": attr.label(default = "@envoy//api:BUILD"), - }, -) - # Python dependencies. If these become non-trivial, we might be better off using a virtualenv to # wrap them, but for now we can treat them as first-class Bazel. def _python_deps(): @@ -94,6 +74,14 @@ def _python_deps(): name = "com_github_twitter_common_finagle_thrift", build_file = "@envoy//bazel/external:twitter_common_finagle_thrift.BUILD", ) + _repository_impl( + name = "six_archive", + build_file = "@com_google_protobuf//:six.BUILD", + ) + native.bind( + name = "six", + actual = "@six_archive//:six", + ) # Bazel native C++ dependencies. For the dependencies that doesn't provide autoconf/automake builds. def _cc_deps(): @@ -127,29 +115,6 @@ def _go_deps(skip_targets): _repository_impl("io_bazel_rules_go") _repository_impl("bazel_gazelle") -def _envoy_api_deps(): - # Treat the data plane API as an external repo, this simplifies exporting the API to - # https://github.com/envoyproxy/data-plane-api. - if "envoy_api" not in native.existing_rules().keys(): - _default_envoy_api(name = "envoy_api") - - native.bind( - name = "api_httpbody_protos", - actual = "@googleapis//:api_httpbody_protos", - ) - native.bind( - name = "http_api_protos", - actual = "@googleapis//:http_api_protos", - ) - _repository_impl( - name = "six_archive", - build_file = "@com_google_protobuf//:six.BUILD", - ) - native.bind( - name = "six", - actual = "@six_archive//:six", - ) - def envoy_dependencies(skip_targets = []): # Treat Envoy's overall build config as an external repo, so projects that # build Envoy as a subcomponent can easily override the config. @@ -207,7 +172,7 @@ def envoy_dependencies(skip_targets = []): _python_deps() _cc_deps() _go_deps(skip_targets) - _envoy_api_deps() + api_dependencies() def _boringssl(): _repository_impl("boringssl") diff --git a/ci/WORKSPACE b/ci/WORKSPACE deleted file mode 100644 index f33b9aa583168..0000000000000 --- a/ci/WORKSPACE +++ /dev/null @@ -1,29 +0,0 @@ -workspace(name = "ci") - -load("//bazel:repositories.bzl", "GO_VERSION", "envoy_dependencies") -load("//bazel:cc_configure.bzl", "cc_configure") - -# We shouldn't need this, but it's a workaround for https://github.com/bazelbuild/bazel/issues/3580. -local_repository( - name = "envoy", - path = "/source", -) - -envoy_dependencies() - -# TODO(htuch): Roll this into envoy_dependencies() -load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") - -rules_foreign_cc_dependencies() - -cc_configure() - -load("@envoy_api//bazel:repositories.bzl", "api_dependencies") - -api_dependencies() - -load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") - -go_rules_dependencies() - -go_register_toolchains(go_version = GO_VERSION) diff --git a/ci/WORKSPACE.filter.example b/ci/WORKSPACE.filter.example index 6262671453103..4eb98345a13f7 100644 --- a/ci/WORKSPACE.filter.example +++ b/ci/WORKSPACE.filter.example @@ -5,6 +5,9 @@ local_repository( path = "/source", ) +load("@envoy//bazel:api_repositories.bzl", "envoy_api_dependencies") +envoy_api_dependencies() + load("@envoy//bazel:repositories.bzl", "envoy_dependencies", "GO_VERSION") load("@envoy//bazel:cc_configure.bzl", "cc_configure") @@ -16,9 +19,6 @@ rules_foreign_cc_dependencies() cc_configure() -load("@envoy_api//bazel:repositories.bzl", "api_dependencies") -api_dependencies() - load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") go_rules_dependencies() go_register_toolchains(go_version = GO_VERSION) diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 6e969f272c652..fa685a7eb498b 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -68,7 +68,6 @@ else fi # Not sandboxing, since non-privileged Docker can't do nested namespaces. -BAZEL_OPTIONS="--package_path %workspace%:${ENVOY_SRCDIR}" export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS}" export BAZEL_BUILD_OPTIONS="--strategy=Genrule=standalone --spawn_strategy=standalone \ --verbose_failures ${BAZEL_OPTIONS} --action_env=HOME --action_env=PYTHONUSERBASE \ @@ -92,7 +91,7 @@ if [ "$1" != "-nofetch" ]; then then git clone https://github.com/envoyproxy/envoy-filter-example.git "${ENVOY_FILTER_EXAMPLE_SRCDIR}" fi - + # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. (cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f 6c0625cb4cc9a21df97cef2a1d065463f2ae81ae) cp -f "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/WORKSPACE @@ -101,7 +100,6 @@ fi # Also setup some space for building Envoy standalone. export ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy mkdir -p "${ENVOY_BUILD_DIR}" -cp -f "${ENVOY_SRCDIR}"/ci/WORKSPACE "${ENVOY_BUILD_DIR}" # This is where we copy build deliverables to. export ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe @@ -119,29 +117,17 @@ mkdir -p "${ENVOY_FAILED_TEST_LOGS}" export ENVOY_BUILD_PROFILE="${ENVOY_BUILD_DIR}"/generated/build-profile mkdir -p "${ENVOY_BUILD_PROFILE}" -# This is where we build for bazel.release* and bazel.dev. -export ENVOY_CI_DIR="${ENVOY_SRCDIR}"/ci - function cleanup() { # Remove build artifacts. This doesn't mess with incremental builds as these # are just symlinks. rm -rf "${ENVOY_SRCDIR}"/bazel-* - rm -rf "${ENVOY_CI_DIR}"/bazel-* - rm -rf "${ENVOY_CI_DIR}"/bazel - rm -rf "${ENVOY_CI_DIR}"/tools - rm -f "${ENVOY_CI_DIR}"/.bazelrc } cleanup trap cleanup EXIT -# Hack due to https://github.com/envoyproxy/envoy/issues/838 and the need to have -# .bazelrc available for build linkstamping. mkdir -p "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel -mkdir -p "${ENVOY_CI_DIR}"/bazel ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel/ -ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_CI_DIR}"/bazel/ cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ -cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_CI_DIR}"/ export BUILDIFIER_BIN="/usr/local/bin/buildifier" diff --git a/ci/do_ci.sh b/ci/do_ci.sh index dff1ee8c1ceae..aa2105a0184aa 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -13,6 +13,7 @@ fi . "$(dirname "$0")"/setup_gcs_cache.sh . "$(dirname "$0")"/build_setup.sh $build_setup_args +cd "${ENVOY_SRCDIR}" echo "building using ${NUM_CPUS} CPUs" @@ -27,11 +28,12 @@ function bazel_with_collection() { if [ "${BAZEL_STATUS}" != "0" ] then declare -r FAILED_TEST_LOGS="$(grep " /build.*test.log" "${BAZEL_OUTPUT}" | sed -e 's/ \/build.*\/testlogs\/\(.*\)/\1/')" - cd bazel-testlogs + pushd bazel-testlogs for f in ${FAILED_TEST_LOGS} do cp --parents -f $f "${ENVOY_FAILED_TEST_LOGS}" done + popd exit "${BAZEL_STATUS}" fi collect_build_profile $1 @@ -39,13 +41,12 @@ function bazel_with_collection() { function bazel_release_binary_build() { echo "Building..." - pushd "${ENVOY_CI_DIR}" bazel build ${BAZEL_BUILD_OPTIONS} -c opt //source/exe:envoy-static collect_build_profile release_build # Copy the envoy-static binary somewhere that we can access outside of the # container. cp -f \ - "${ENVOY_CI_DIR}"/bazel-bin/source/exe/envoy-static \ + "${ENVOY_SRCDIR}"/bazel-bin/source/exe/envoy-static \ "${ENVOY_DELIVERY_DIR}"/envoy # TODO(mattklein123): Replace this with caching and a different job which creates images. @@ -54,20 +55,16 @@ function bazel_release_binary_build() { cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${ENVOY_SRCDIR}"/build_release mkdir -p "${ENVOY_SRCDIR}"/build_release_stripped strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${ENVOY_SRCDIR}"/build_release_stripped/envoy - # TODO(wu-bin): Remove once https://github.com/envoyproxy/envoy/pull/6229 is merged. - bazel clean - popd } function bazel_debug_binary_build() { echo "Building..." - cd "${ENVOY_CI_DIR}" bazel build ${BAZEL_BUILD_OPTIONS} -c dbg //source/exe:envoy-static collect_build_profile debug_build # Copy the envoy-static binary somewhere that we can access outside of the # container. cp -f \ - "${ENVOY_CI_DIR}"/bazel-bin/source/exe/envoy-static \ + "${ENVOY_SRCDIR}"/bazel-bin/source/exe/envoy-static \ "${ENVOY_DELIVERY_DIR}"/envoy-debug } @@ -122,12 +119,12 @@ elif [[ "$1" == "bazel.asan" ]]; then setup_clang_toolchain echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests..." - cd "${ENVOY_SRCDIR}" bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan //test/... echo "Building and testing envoy-filter-example tests..." - cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" + pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan \ //:echo2_integration_test //:envoy_binary_test + popd # Also validate that integration test traffic tapping (useful when debugging etc.) # works. This requires that we set TAP_PATH. We do this under bazel.asan to # ensure a debug build in CI. @@ -135,7 +132,6 @@ elif [[ "$1" == "bazel.asan" ]]; then TAP_TMP=/tmp/tap/ rm -rf "${TAP_TMP}" mkdir -p "${TAP_TMP}" - cd "${ENVOY_SRCDIR}" bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-asan \ //test/extensions/transport_sockets/tls/integration:ssl_integration_test \ --test_env=TAP_PATH="${TAP_TMP}/tap" @@ -148,7 +144,6 @@ elif [[ "$1" == "bazel.tsan" ]]; then setup_clang_toolchain echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests..." - cd "${ENVOY_SRCDIR}" bazel_with_collection test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan //test/... echo "Building and testing envoy-filter-example tests..." cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" @@ -159,13 +154,12 @@ elif [[ "$1" == "bazel.dev" ]]; then setup_clang_toolchain # This doesn't go into CI but is available for developer convenience. echo "bazel fastbuild build with tests..." - cd "${ENVOY_CI_DIR}" echo "Building..." bazel build ${BAZEL_BUILD_OPTIONS} -c fastbuild //source/exe:envoy-static # Copy the envoy-static binary somewhere that we can access outside of the # container for developers. cp -f \ - "${ENVOY_CI_DIR}"/bazel-bin/source/exe/envoy-static \ + "${ENVOY_SRCDIR}"/bazel-bin/source/exe/envoy-static \ "${ENVOY_DELIVERY_DIR}"/envoy-fastbuild echo "Building and testing..." bazel test ${BAZEL_TEST_OPTIONS} -c fastbuild //test/... @@ -187,7 +181,6 @@ elif [[ "$1" == "bazel.compile_time_options" ]]; then # This doesn't go into CI but is available for developer convenience. echo "bazel with different compiletime options build with tests..." # Building all the dependencies from scratch to link them against libc++. - cd "${ENVOY_SRCDIR}" echo "Building..." bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg //source/exe:envoy-static echo "Building and testing..." @@ -213,13 +206,11 @@ elif [[ "$1" == "bazel.ipv6_tests" ]]; then setup_clang_toolchain echo "Testing..." - cd "${ENVOY_CI_DIR}" bazel_with_collection test ${BAZEL_TEST_OPTIONS} --test_env=ENVOY_IP_TEST_VERSIONS=v6only -c fastbuild \ //test/integration/... //test/common/network/... exit 0 elif [[ "$1" == "bazel.api" ]]; then setup_clang_toolchain - cd "${ENVOY_CI_DIR}" echo "Building API..." bazel build ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_api//envoy/... echo "Testing API..." @@ -232,7 +223,6 @@ elif [[ "$1" == "bazel.coverage" ]]; then # gcovr is a pain to run with `bazel run`, so package it up into a # relocatable and hermetic-ish .par file. - cd "${ENVOY_SRCDIR}" bazel build @com_github_gcovr_gcovr//:gcovr.par export GCOVR="/tmp/gcovr.par" cp -f "${ENVOY_SRCDIR}/bazel-bin/external/com_github_gcovr_gcovr/gcovr.par" ${GCOVR} @@ -250,10 +240,7 @@ elif [[ "$1" == "bazel.coverage" ]]; then exit 0 elif [[ "$1" == "bazel.clang_tidy" ]]; then setup_clang_toolchain - # TODO(wu-bin): Remove once https://github.com/envoyproxy/envoy/pull/6229 is merged. - export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --linkopt='-Wl,--allow-multiple-definition'" - cd "${ENVOY_CI_DIR}" - ./run_clang_tidy.sh + ci/run_clang_tidy.sh exit 0 elif [[ "$1" == "bazel.coverity" ]]; then # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy @@ -263,7 +250,6 @@ elif [[ "$1" == "bazel.coverity" ]]; then setup_gcc_toolchain echo "bazel Coverity Scan build" echo "Building..." - cd "${ENVOY_CI_DIR}" /build/cov-analysis/bin/cov-build --dir "${ENVOY_BUILD_DIR}"/cov-int bazel build --action_env=LD_PRELOAD ${BAZEL_BUILD_OPTIONS} \ -c opt //source/exe:envoy-static # tar up the coverity results @@ -275,11 +261,9 @@ elif [[ "$1" == "bazel.coverity" ]]; then exit 0 elif [[ "$1" == "fix_format" ]]; then echo "fix_format..." - cd "${ENVOY_SRCDIR}" ./tools/check_format.py fix exit 0 elif [[ "$1" == "check_format" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_format_test..." ./tools/check_format_test_helper.py --log=WARN echo "check_format..." @@ -287,27 +271,22 @@ elif [[ "$1" == "check_format" ]]; then ./tools/format_python_tools.sh check exit 0 elif [[ "$1" == "check_repositories" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_repositories..." ./tools/check_repositories.sh exit 0 elif [[ "$1" == "check_spelling" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_spelling..." ./tools/check_spelling.sh check exit 0 elif [[ "$1" == "fix_spelling" ]];then - cd "${ENVOY_SRCDIR}" echo "fix_spell..." ./tools/check_spelling.sh fix exit 0 elif [[ "$1" == "check_spelling_pedantic" ]]; then - cd "${ENVOY_SRCDIR}" echo "check_spelling_pedantic..." ./tools/check_spelling_pedantic.py check exit 0 elif [[ "$1" == "fix_spelling_pedantic" ]]; then - cd "${ENVOY_SRCDIR}" echo "fix_spelling_pedantic..." ./tools/check_spelling_pedantic.py fix exit 0 diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index 29d4381b51828..27c8212b87ef0 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -3,6 +3,15 @@ set -e echo "Generating compilation database..." + +cp -f .bazelrc .bazelrc.bak + +function cleanup() { + cp -f .bazelrc.bak .bazelrc + rm -f .bazelrc.bak +} +trap cleanup EXIT + # The compilation database generate script doesn't support passing build options via CLI. # Writing them into bazelrc echo "build ${BAZEL_BUILD_OPTIONS}" >> .bazelrc @@ -11,11 +20,6 @@ echo "build ${BAZEL_BUILD_OPTIONS}" >> .bazelrc # by clang-tidy "${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --run_bazel_build --include_headers -# It had to be in ENVOY_CI_DIR to run bazel to generate compile database, but clang-tidy-diff -# diff against current directory, moving them to ENVOY_SRCDIR. -mv ./compile_commands.json "${ENVOY_SRCDIR}/compile_commands.json" -cd "${ENVOY_SRCDIR}" - # Do not run incremental clang-tidy on check_format testdata files. function exclude_testdata() { grep -v tools/testdata/check_format/ diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index f241151682fe5..b5045be4171e6 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -271,7 +271,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { *dispatcher_, fake_upstream_->localAddress(), nullptr, std::move(async_client_transport_socket_), nullptr); ON_CALL(*mock_cluster_info_, connectTimeout()) - .WillByDefault(Return(std::chrono::milliseconds(1000))); + .WillByDefault(Return(std::chrono::milliseconds(10000))); EXPECT_CALL(*mock_cluster_info_, name()).WillRepeatedly(ReturnRef(fake_cluster_name_)); EXPECT_CALL(cm_, get(_)).WillRepeatedly(Return(&thread_local_cluster_)); EXPECT_CALL(thread_local_cluster_, info()).WillRepeatedly(Return(cluster_info_ptr_)); From 9e22bab3765978f9d4dbd8090f717304524ccfc5 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Tue, 9 Apr 2019 09:03:01 -0400 Subject: [PATCH 078/165] test: fix use-of-uninitialized-value bug in unit test (#6515) Description: fixes a use of uninitialized value bug in this unit test Risk Level: low Testing: unit tests Signed-off-by: Elisha Ziskind --- test/extensions/filters/network/common/redis/codec_impl_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/test/extensions/filters/network/common/redis/codec_impl_test.cc b/test/extensions/filters/network/common/redis/codec_impl_test.cc index 35c7e17fa8bea..b69b05f2bccad 100644 --- a/test/extensions/filters/network/common/redis/codec_impl_test.cc +++ b/test/extensions/filters/network/common/redis/codec_impl_test.cc @@ -65,6 +65,7 @@ TEST_F(RedisRespValueTest, EqualityTestingAndCopyingTest) { simplestring_value.type(RespType::SimpleString); error_value.type(RespType::Error); integer_value.type(RespType::Integer); + integer_value.asInteger() = 123; EXPECT_NE(bulkstring_value, simplestring_value); EXPECT_NE(bulkstring_value, error_value); From fe14b11b813efe7b4f2768ce20f035d128cc93ae Mon Sep 17 00:00:00 2001 From: Ismo Puustinen Date: Tue, 9 Apr 2019 16:53:29 +0300 Subject: [PATCH 079/165] tests: fix a compilation error. (#6521) Catch the CodecProtocolException by reference to fix the following error when building tests: test/common/http/http1/codec_impl_test.cc: In member function 'virtual void Envoy::Http::Http1::Http1ServerConnectionImplTest_HeaderMutateEmbeddedCRLF_Test::TestBody()': test/common/http/http1/codec_impl_test.cc:347:16: error: catching polymorphic type 'class Envoy::Http::CodecProtocolException' by value [-Werror=catch-value=] } catch (CodecProtocolException) { ^~~~~~~~~~~~~~~~~~~~~~ cc1plus: all warnings being treated as errors Signed-off-by: Ismo Puustinen --- test/common/http/http1/codec_impl_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 79bea04d1c0fd..82e5c188788b9 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -344,7 +344,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { absl::StrCat(example_input.substr(0, n), std::string(1, c), example_input.substr(n))); try { codec_->dispatch(buffer); - } catch (CodecProtocolException) { + } catch (CodecProtocolException&) { } } } From fd7c172af181275693297efbe148fd8bb414ef48 Mon Sep 17 00:00:00 2001 From: Daniel Mangum <31777345+HashedDan@users.noreply.github.com> Date: Tue, 9 Apr 2019 10:39:03 -0500 Subject: [PATCH 080/165] docs: deprecated.md to sphinx docs (#6454) Moved DEPRECATED.md to sphinx docs. Risk Level: Low - only documentation Testing: Compiles with sphinx docs without warnings or errors Docs Changes: deprecated.rst created in intro section of sphinx docs and added to toctree Release Notes: N/A Fixes: #6386 Signed-off-by: HashedDan --- CONTRIBUTING.md | 5 +- GOVERNANCE.md | 4 +- PULL_REQUESTS.md | 2 +- api/envoy/api/v2/cds.proto | 2 +- docs/root/configuration/runtime.rst | 2 +- .../root/intro/deprecated.rst | 76 ++++++++++--------- docs/root/intro/intro.rst | 1 + docs/root/intro/version_history.rst | 4 +- source/common/protobuf/utility.cc | 2 +- tools/deprecate_version/deprecate_version.py | 6 +- 10 files changed, 56 insertions(+), 48 deletions(-) rename DEPRECATED.md => docs/root/intro/deprecated.rst (62%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fab701755c24d..fa45143de8bd7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -51,7 +51,7 @@ maximize the chances of your PR being merged. deprecation window. Within this window, a warning of deprecation should be carefully logged (some features might need rate limiting for logging this). We make no guarantees about code or deployments that rely on undocumented behavior. -* All deprecations/breaking changes will be clearly listed in [DEPRECATED.md](DEPRECATED.md). +* All deprecations/breaking changes will be clearly listed in the [deprecated log](docs/root/intro/deprecated.rst). * High risk deprecations//breaking changes may be announced to the [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list but by default it is expected the multi-phase warn-by-default/fail-by-default is sufficient to warn users to move @@ -132,7 +132,8 @@ maximize the chances of your PR being merged. changes for 7 days. Obviously PRs that are closed due to lack of activity can be reopened later. Closing stale PRs helps us to keep on top of all of the work currently in flight. * If a commit deprecates a feature, the commit message must mention what has been deprecated. - Additionally, [DEPRECATED.md](DEPRECATED.md) must be updated as part of the commit. + Additionally, the [deprecated log](docs/root/intro/deprecated.rst) must be updated with relevant + RST links for fields and messages as part of the commit. * Please consider joining the [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev) mailing list. * If your PR involves any changes to diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 1c182eb00f75d..70417bc89c234 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -84,7 +84,7 @@ or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/get corrections. * Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to "1.6.0". Also remove the "Pending" tag from the top of the [release notes](docs/root/intro/version_history.rst) - and [DEPRECATED.md](DEPRECATED.md). Get a review and merge. + and [deprecated log](docs/root/intro/deprecated.rst). Get a review and merge. * **Wait for tests to pass on [master](https://circleci.com/gh/envoyproxy/envoy/tree/master).** * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should @@ -99,7 +99,7 @@ or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/get Envoy account post). * Do a new PR to update [VERSION](VERSION) to the next development release. E.g., "1.7.0-dev". At the same time, also add a new empty "pending" section to the [release - notes](docs/root/intro/version_history.rst) and to [DEPRECATED.md](DEPRECATED.md) for the + notes](docs/root/intro/version_history.rst) and to [deprecated log](docs/root/intro/deprecated.rst) for the following version. E.g., "1.7.0 (pending)". * Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh 1.8.0 1.10.0`) to file tracking issues for code which can be removed. diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index ad9cdafb99466..0bc71ab31e6bb 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -74,7 +74,7 @@ you may instead just tag the PR with the issue: ### Deprecated If this PR deprecates existing Envoy APIs or code, it should include -an update to the [deprecated file](DEPRECATED.md) and a one line note in the PR +an update to the [deprecated file](docs/root/intro/deprecated.rst) and a one line note in the PR description. If you mark existing APIs or code as deprecated, when the next release is cut, the diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index e13f8dc771860..6fb858efd6420 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -194,7 +194,7 @@ message Cluster { // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS` clusters. // This field supersedes :ref:`hosts` field. - // [#comment:TODO(dio): Deprecate the hosts field and add it to DEPRECATED.md + // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` // once load_assignment is implemented.] // // .. attention:: diff --git a/docs/root/configuration/runtime.rst b/docs/root/configuration/runtime.rst index 7d3ced54614c0..9934b005ba192 100644 --- a/docs/root/configuration/runtime.rst +++ b/docs/root/configuration/runtime.rst @@ -89,7 +89,7 @@ feature deprecation in Envoy is in 3 phases: warn-by-default, fail-by-default, a In the first phase, Envoy logs a warning to the warning log that the feature is deprecated and increments the :ref:`deprecated_feature_use ` runtime stat. -Users are encouraged to go to :repo:`DEPRECATED.md ` to see how to +Users are encouraged to go to :ref:`deprecated ` to see how to migrate to the new code path and make sure it is suitable for their use case. In the second phase the message and filename will be added to diff --git a/DEPRECATED.md b/docs/root/intro/deprecated.rst similarity index 62% rename from DEPRECATED.md rename to docs/root/intro/deprecated.rst index d7320aa7d87ea..423a16d492e5c 100644 --- a/DEPRECATED.md +++ b/docs/root/intro/deprecated.rst @@ -1,66 +1,72 @@ -# DEPRECATED +.. _deprecated: + +Deprecated +---------- As of release 1.3.0, Envoy will follow a -[Breaking Change Policy](https://github.com/envoyproxy/envoy/blob/master//CONTRIBUTING.md#breaking-change-policy). +`Breaking Change Policy `_. The following features have been DEPRECATED and will be removed in the specified release cycle. A logged warning is expected for each deprecated item that is in deprecation window. +Deprecated items below are listed in chronological order. -## Version 1.11.0 (Pending) +Version 1.11.0 (Pending) +======================== -## Version 1.10.0 (Apr 5, 2019) -* Use of `use_alpha` in [Ext-Authz Authorization Service](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/auth/v2/external_auth.proto) is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. +Version 1.10.0 (Apr 5, 2019) +============================ +* Use of `use_alpha` in :ref:`Ext-Authz Authorization Service ` is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * Use of `enabled` in `CorsPolicy`, found in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). + :ref:`route.proto `. Set the `filter_enabled` field instead. * Use of the `type` field in the `FaultDelay` message (found in - [fault.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/fault/v2/fault.proto)) + :ref:`fault.proto `) has been deprecated. It was never used and setting it has no effect. It will be removed in the following release. -## Version 1.9.0 (Dec 20, 2018) - -* Order of execution of the network write filter chain has been reversed. Prior to this release cycle it was incorrect, see [#4599](https://github.com/envoyproxy/envoy/issues/4599). In the 1.9.0 release cycle we introduced `bugfix_reverse_write_filter_order` in [lds.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/lds.proto) to temporarily support both old and new behaviors. Note this boolean field is deprecated. -* Order of execution of the HTTP encoder filter chain has been reversed. Prior to this release cycle it was incorrect, see [#4599](https://github.com/envoyproxy/envoy/issues/4599). In the 1.9.0 release cycle we introduced `bugfix_reverse_encode_order` in [http_connection_manager.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto) to temporarily support both old and new behaviors. Note this boolean field is deprecated. +Version 1.9.0 (Dec 20, 2018) +============================ +* Order of execution of the network write filter chain has been reversed. Prior to this release cycle it was incorrect, see `#4599 `_. In the 1.9.0 release cycle we introduced `bugfix_reverse_write_filter_order` in `lds.proto `_ to temporarily support both old and new behaviors. Note this boolean field is deprecated. +* Order of execution of the HTTP encoder filter chain has been reversed. Prior to this release cycle it was incorrect, see `#4599 `_. In the 1.9.0 release cycle we introduced `bugfix_reverse_encode_order` in `http_connection_manager.proto `_ to temporarily support both old and new behaviors. Note this boolean field is deprecated. * Use of the v1 REST_LEGACY ApiConfigSource is deprecated. * Use of std::hash in the ring hash load balancer is deprecated. -* Use of `rate_limit_service` configuration in the [bootstrap configuration](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/bootstrap/v2/bootstrap.proto) is deprecated. +* Use of `rate_limit_service` configuration in the `bootstrap configuration `_ is deprecated. * Use of `runtime_key` in `RequestMirrorPolicy`, found in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto) + `route.proto `_ is deprecated. Set the `runtime_fraction` field instead. -* Use of buffer filter `max_request_time` is deprecated in favor of the request timeout found in [HttpConnectionManager](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto) - -## Version 1.8.0 (Oct 4, 2018) +* Use of buffer filter `max_request_time` is deprecated in favor of the request timeout found in `HttpConnectionManager `_ +Version 1.8.0 (Oct 4, 2018) +============================== * Use of the v1 API (including `*.deprecated_v1` fields in the v2 API) is deprecated. - See envoy-announce [email](https://groups.google.com/forum/#!topic/envoy-announce/oPnYMZw8H4U). + See envoy-announce `email `_. * Use of the legacy - [ratelimit.proto](https://github.com/envoyproxy/envoy/blob/b0a518d064c8255e0e20557a8f909b6ff457558f/source/common/ratelimit/ratelimit.proto) + `ratelimit.proto `_ is deprecated, in favor of the proto defined in - [date-plane-api](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v2/rls.proto) + `date-plane-api `_ Prior to 1.8.0, Envoy can use either proto to send client requests to a ratelimit server with the use of the - `use_data_plane_proto` boolean flag in the [ratelimit configuration](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/ratelimit/v2/rls.proto). + `use_data_plane_proto` boolean flag in the `ratelimit configuration `_. However, when using the deprecated client a warning is logged. * Use of the --v2-config-only flag. * Use of both `use_websocket` and `websocket_config` in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto) + `route.proto `_ is deprecated. Please use the new `upgrade_configs` in the - [HttpConnectionManager](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto) + `HttpConnectionManager `_ instead. -* Use of the integer `percent` field in [FaultDelay](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/fault/v2/fault.proto) - and in [FaultAbort](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/http/fault/v2/fault.proto) is deprecated in favor +* Use of the integer `percent` field in `FaultDelay `_ + and in `FaultAbort `_ is deprecated in favor of the new `FractionalPercent` based `percentage` field. * Setting hosts via `hosts` field in `Cluster` is deprecated. Use `load_assignment` instead. * Use of `response_headers_to_*` and `request_headers_to_add` are deprecated at the `RouteAction` level. Please use the configuration options at the `Route` level. * Use of `runtime` in `RouteMatch`, found in - [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto). + `route.proto `_. Set the `runtime_fraction` field instead. -* Use of the string `user` field in `Authenticated` in [rbac.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/rbac/v2alpha/rbac.proto) +* Use of the string `user` field in `Authenticated` in `rbac.proto `_ is deprecated in favor of the new `StringMatcher` based `principal_name` field. -## Version 1.7.0 (Jun 21, 2018) - +Version 1.7.0 (Jun 21, 2018) +=============================== * Admin mutations should be sent as POSTs rather than GETs. HTTP GETs will result in an error status code and will not have their intended effect. Prior to 1.7, GETs can be used for admin mutations, but a warning is logged. @@ -76,8 +82,8 @@ A logged warning is expected for each deprecated item that is in deprecation win field where one can specify HeaderMatch objects to match on. * The `sni_domains` field in the filter chain match was deprecated/renamed to `server_names`. -## Version 1.6.0 (March 20, 2018) - +Version 1.6.0 (March 20, 2018) +================================= * DOWNSTREAM_ADDRESS log formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT instead. * CLIENT_IP header formatter is deprecated. Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT instead. @@ -86,8 +92,8 @@ A logged warning is expected for each deprecated item that is in deprecation win * `value` and `regex` fields in the `HeaderMatcher` message is deprecated. Use the `exact_match` or `regex_match` oneof instead. -## Version 1.5.0 (Dec 4, 2017) - +Version 1.5.0 (Dec 4, 2017) +============================== * The outlier detection `ejections_total` stats counter has been deprecated and not replaced. Monitor the individual `ejections_detected_*` counters for the detectors of interest, or `ejections_enforced_total` for the total number of ejections that actually occurred. @@ -96,8 +102,8 @@ A logged warning is expected for each deprecated item that is in deprecation win * The outlier detection `ejections_success_rate` stats counter has been deprecated in favour of `ejections_detected_success_rate` and `ejections_enforced_success_rate`. -## Version 1.4.0 (Aug 24, 2017) - +Version 1.4.0 (Aug 24, 2017) +============================ * Config option `statsd_local_udp_port` has been deprecated and has been replaced with `statsd_udp_ip_address`. * `HttpFilterConfigFactory` filter API has been deprecated in favor of `NamedHttpFilterConfigFactory`. @@ -105,7 +111,7 @@ A logged warning is expected for each deprecated item that is in deprecation win * The following log macros have been deprecated: `log_trace`, `log_debug`, `conn_log`, `conn_log_info`, `conn_log_debug`, `conn_log_trace`, `stream_log`, `stream_log_info`, `stream_log_debug`, `stream_log_trace`. For replacements, please see - [logger.h](https://github.com/envoyproxy/envoy/blob/master/source/common/common/logger.h). + `logger.h `_. * The connectionId() and ssl() callbacks of StreamFilterCallbacks have been deprecated and replaced with a more general connection() callback, which, when not returning a nullptr, can be used to get the connection id and SSL connection from the returned Connection object pointer. diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst index 014f89650d955..9133726de1783 100644 --- a/docs/root/intro/intro.rst +++ b/docs/root/intro/intro.rst @@ -12,3 +12,4 @@ Introduction comparison getting_help version_history + deprecated diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 5551b95823419..c0498d313c85e 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -592,7 +592,7 @@ Version history * mongo filter: added :ref:`fault injection `. * mongo filter: added :ref:`"drain close" ` support. * outlier detection: added :ref:`HTTP gateway failure type `. - See `DEPRECATED.md `_ + See :ref:`deprecated log ` for outlier detection stats deprecations in this release. * redis: the :ref:`redis proxy filter ` is now considered production ready. @@ -669,7 +669,7 @@ Version history * UDP `statsd_ip_address` option added. * Per-cluster DNS resolvers added. * :ref:`Fault filter ` enhancements and fixes. -* Several features are :repo:`deprecated as of the 1.4.0 release `. They +* Several features are :ref:`deprecated as of the 1.4.0 release `. They will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the `HttpFilterConfigFactory` filter API has been deprecated in favor of `NamedHttpFilterConfigFactory`. diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index e9d505bb97a01..9c33b50a38e2a 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -166,7 +166,7 @@ void MessageUtil::checkForDeprecation(const Protobuf::Message& message, Runtime: if (field->options().deprecated()) { std::string err = fmt::format( "Using deprecated option '{}' from file {}. This configuration will be removed from " - "Envoy soon. Please see https://github.com/envoyproxy/envoy/blob/master/DEPRECATED.md " + "Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated " "for details.", field->full_name(), filename); if (warn_only) { diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index 35a8767db6f7e..b73c152d6d56a 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -49,7 +49,7 @@ class DeprecateVersionError(Exception): # Figure out map from version to set of commits. def GetHistory(): - """Obtain mapping from release version to DEPRECATED.md PRs. + """Obtain mapping from release version to docs/root/intro/deprecated.rst PRs. Returns: A dictionary mapping from release version to a set of git commit objects. @@ -57,7 +57,7 @@ def GetHistory(): repo = Repo(os.getcwd()) version = None history = defaultdict(set) - for commit, lines in repo.blame('HEAD', 'DEPRECATED.md'): + for commit, lines in repo.blame('HEAD', 'docs/root/intro/deprecated.rst'): for line in lines: sr = re.match('## Version (.*) \(.*\)', line) if sr: @@ -145,4 +145,4 @@ def CreateIssues(deprecate_for_version, deprecate_by_version, access_token, comm if deprecate_for_version not in history: print('Unknown version: %s (valid versions: %s)' % (deprecate_for_version, history.keys())) CreateIssues(deprecate_for_version, deprecate_by_version, access_token, - history[deprecate_for_version]) + history[deprecate_for_version]) \ No newline at end of file From fc2e04f9e3ca5619b7753ac90558415237e2a454 Mon Sep 17 00:00:00 2001 From: Fred Douglas <43351173+fredlas@users.noreply.github.com> Date: Tue, 9 Apr 2019 17:34:35 -0400 Subject: [PATCH 081/165] config: gRPC xDS impls now own, rather than subclass, GrpcStream (#6430) GrpcStream was created to share code between the vanilla and delta gRPC xDS implementations. It was a purely mechanical "grab everything they have in common" and always felt a little weird. With this refactor, there is now a logical story: GrpcStream manages a rate-limited gRPC stream, and DeltaSubscriptionImpl and GrpcMuxImpl own a (rather than "are a") GrpcStream to do their communication. Because they, not GrpcStream, are what understand the xDS protocol, they have taken over the request queuing logic from GrpcStream. Part of #5270. Risk Level: low Testing: added grpc_stream_test.cc Signed-off-by: Fred Douglas --- include/envoy/config/BUILD | 37 ++-- include/envoy/config/xds_grpc_context.h | 42 +++++ source/common/config/BUILD | 1 + .../common/config/delta_subscription_impl.h | 164 +++++++++++------- source/common/config/grpc_mux_impl.cc | 74 +++++--- source/common/config/grpc_mux_impl.h | 36 ++-- source/common/config/grpc_stream.h | 72 +++----- test/common/config/BUILD | 12 ++ .../config/delta_subscription_impl_test.cc | 26 ++- .../config/delta_subscription_test_harness.h | 2 +- test/common/config/grpc_mux_impl_test.cc | 26 +-- test/common/config/grpc_stream_test.cc | 133 ++++++++++++++ .../config/grpc_subscription_impl_test.cc | 5 +- .../config/grpc_subscription_test_harness.h | 8 +- test/mocks/config/BUILD | 1 + test/mocks/config/mocks.cc | 3 + test/mocks/config/mocks.h | 19 +- 17 files changed, 467 insertions(+), 194 deletions(-) create mode 100644 include/envoy/config/xds_grpc_context.h create mode 100644 test/common/config/grpc_stream_test.cc diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index 4696d029075fc..8c16805bf754f 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -8,6 +8,26 @@ load( envoy_package() +envoy_cc_library( + name = "config_provider_interface", + hdrs = ["config_provider.h"], + external_deps = ["abseil_optional"], + deps = [ + "//include/envoy/common:time_interface", + "//source/common/protobuf", + ], +) + +envoy_cc_library( + name = "config_provider_manager_interface", + hdrs = ["config_provider_manager.h"], + deps = [ + ":config_provider_interface", + "//include/envoy/server:filter_config_interface", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "grpc_mux_interface", hdrs = ["grpc_mux.h"], @@ -36,21 +56,10 @@ envoy_cc_library( ) envoy_cc_library( - name = "config_provider_interface", - hdrs = ["config_provider.h"], - external_deps = ["abseil_optional"], + name = "xds_grpc_context_interface", + hdrs = ["xds_grpc_context.h"], deps = [ - "//include/envoy/common:time_interface", - "//source/common/protobuf", - ], -) - -envoy_cc_library( - name = "config_provider_manager_interface", - hdrs = ["config_provider_manager.h"], - deps = [ - ":config_provider_interface", - "//include/envoy/server:filter_config_interface", + ":subscription_interface", "//source/common/protobuf", ], ) diff --git a/include/envoy/config/xds_grpc_context.h b/include/envoy/config/xds_grpc_context.h new file mode 100644 index 0000000000000..aba3a824a67ce --- /dev/null +++ b/include/envoy/config/xds_grpc_context.h @@ -0,0 +1,42 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/config/subscription.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Config { + +/** + * A grouping of callbacks that an XdsGrpcContext should provide to its GrpcStream. + */ +template class GrpcStreamCallbacks { +public: + virtual ~GrpcStreamCallbacks() {} + + /** + * For the GrpcStream to prompt the context to take appropriate action in response to the + * gRPC stream having been successfully established. + */ + virtual void onStreamEstablished() PURE; + + /** + * For the GrpcStream to prompt the context to take appropriate action in response to + * failure to establish the gRPC stream. + */ + virtual void onEstablishmentFailure() PURE; + + /** + * For the GrpcStream to pass received protos to the context. + */ + virtual void onDiscoveryResponse(std::unique_ptr&& message) PURE; + + /** + * For the GrpcStream to call when its rate limiting logic allows more requests to be sent. + */ + virtual void onWriteable() PURE; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 0d41c3e672e7a..61211cda8f2c2 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -120,6 +120,7 @@ envoy_cc_library( ":utility_lib", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", + "//include/envoy/config:xds_grpc_context_interface", "//include/envoy/grpc:async_client_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/common:backoff_lib", diff --git a/source/common/config/delta_subscription_impl.h b/source/common/config/delta_subscription_impl.h index 5cd583f357a31..1e5f0e7cc0adb 100644 --- a/source/common/config/delta_subscription_impl.h +++ b/source/common/config/delta_subscription_impl.h @@ -5,6 +5,7 @@ #include "envoy/api/v2/discovery.pb.h" #include "envoy/common/token_bucket.h" #include "envoy/config/subscription.h" +#include "envoy/config/xds_grpc_context.h" #include "common/common/assert.h" #include "common/common/backoff_strategy.h" @@ -26,12 +27,12 @@ struct ResourceNameDiff { /** * Manages the logic of a (non-aggregated) delta xDS subscription. - * TODO(fredlas) add aggregation support. + * TODO(fredlas) add aggregation support. The plan is for that to happen in XdsGrpcContext, + * which this class will then "have a" rather than "be a". */ -class DeltaSubscriptionImpl - : public Subscription, - public GrpcStream { +class DeltaSubscriptionImpl : public Subscription, + public GrpcStreamCallbacks, + public Logger::Loggable { public: DeltaSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, Event::Dispatcher& dispatcher, @@ -39,9 +40,8 @@ class DeltaSubscriptionImpl absl::string_view type_url, Runtime::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout) - : GrpcStream(std::move(async_client), service_method, random, dispatcher, - scope, rate_limit_settings), + : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, + rate_limit_settings), type_url_(type_url), local_info_(local_info), stats_(stats), dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout) { request_.set_type_url(type_url_); @@ -66,8 +66,8 @@ class DeltaSubscriptionImpl queueDiscoveryRequest(diff); } - void sendDiscoveryRequest(const ResourceNameDiff& diff) override { - if (!grpcStreamAvailable()) { + void sendDiscoveryRequest(const ResourceNameDiff& diff) { + if (!grpc_stream_.grpcStreamAvailable()) { ENVOY_LOG(debug, "No stream available to sendDiscoveryRequest for {}", type_url_); return; // Drop this request; the reconnect will enqueue a new one. } @@ -85,7 +85,7 @@ class DeltaSubscriptionImpl Protobuf::RepeatedFieldBackInserter(request_.mutable_resource_names_unsubscribe())); ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url_, request_.DebugString()); - sendMessage(request_); + grpc_stream_.sendMessage(request_); request_.clear_error_detail(); request_.clear_initial_resource_versions(); } @@ -113,57 +113,8 @@ class DeltaSubscriptionImpl envoy::api::v2::DeltaDiscoveryRequest internalRequestStateForTest() const { return request_; } - // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& version_info) { - callbacks_->onConfigUpdate(added_resources, removed_resources, version_info); - for (const auto& resource : added_resources) { - setResourceVersion(resource.name(), resource.version()); - } - // If a resource is gone, there is no longer a meaningful version for it that makes sense to - // provide to the server upon stream reconnect: either it will continue to not exist, in which - // case saying nothing is fine, or the server will bring back something new, which we should - // receive regardless (which is the logic that not specifying a version will get you). - // - // So, leave the version map entry present but blank. It will be left out of - // initial_resource_versions messages, but will remind us to explicitly tell the server "I'm - // cancelling my subscription" when we lose interest. - for (const auto& resource_name : removed_resources) { - if (resource_names_.find(resource_name) != resource_names_.end()) { - setResourceWaitingForServer(resource_name); - } - } - stats_.update_success_.inc(); - stats_.update_attempt_.inc(); - stats_.version_.set(HashUtil::xxHash64(version_info)); - ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, - added_resources.size(), removed_resources.size()); - } - - void handleResponse(std::unique_ptr&& message) override { - ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url_, - message->system_version_info()); - disableInitFetchTimeoutTimer(); - - request_.set_response_nonce(message->nonce()); - - try { - onConfigUpdate(message->resources(), message->removed_resources(), - message->system_version_info()); - } catch (const EnvoyException& e) { - stats_.update_rejected_.inc(); - ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); - stats_.update_attempt_.inc(); - callbacks_->onConfigUpdateFailed(&e); - ::google::rpc::Status* error_detail = request_.mutable_error_detail(); - error_detail->set_code(Grpc::Status::GrpcStatus::Internal); - error_detail->set_message(e.what()); - } - queueDiscoveryRequest(ResourceNameDiff()); // no change to subscribed resources - } - - void handleStreamEstablished() override { + // Config::GrpcStreamCallbacks + void onStreamEstablished() override { // initial_resource_versions "must be populated for first request in a stream", so guarantee // that the initial version'd request we're about to enqueue is what gets sent. clearRequestQueue(); @@ -182,7 +133,7 @@ class DeltaSubscriptionImpl queueDiscoveryRequest(ResourceNameDiff()); // no change to subscribed resources } - void handleEstablishmentFailure() override { + void onEstablishmentFailure() override { disableInitFetchTimeoutTimer(); stats_.update_failure_.inc(); ENVOY_LOG(debug, "delta update for {} failed", type_url_); @@ -190,7 +141,32 @@ class DeltaSubscriptionImpl callbacks_->onConfigUpdateFailed(nullptr); } - // Config::DeltaSubscription + void + onDiscoveryResponse(std::unique_ptr&& message) override { + ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url_, + message->system_version_info()); + disableInitFetchTimeoutTimer(); + + request_.set_response_nonce(message->nonce()); + + try { + handleConfigUpdate(message->resources(), message->removed_resources(), + message->system_version_info()); + } catch (const EnvoyException& e) { + stats_.update_rejected_.inc(); + ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); + stats_.update_attempt_.inc(); + callbacks_->onConfigUpdateFailed(&e); + ::google::rpc::Status* error_detail = request_.mutable_error_detail(); + error_detail->set_code(Grpc::Status::GrpcStatus::Internal); + error_detail->set_message(e.what()); + } + queueDiscoveryRequest(ResourceNameDiff()); // no change to subscribed resources + } + + void onWriteable() override { drainRequests(); } + + // Config::Subscription void start(const std::vector& resources, SubscriptionCallbacks& callbacks) override { callbacks_ = &callbacks; @@ -202,7 +178,7 @@ class DeltaSubscriptionImpl init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); } - establishNewStream(); + grpc_stream_.establishNewStream(); subscribe(resources); // The attempt stat here is maintained for the purposes of having consistency between ADS and // individual DeltaSubscriptions. Since ADS is push based and muxed, the notion of an @@ -216,6 +192,34 @@ class DeltaSubscriptionImpl } private: + void + handleConfigUpdate(const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) { + callbacks_->onConfigUpdate(added_resources, removed_resources, version_info); + for (const auto& resource : added_resources) { + setResourceVersion(resource.name(), resource.version()); + } + // If a resource is gone, there is no longer a meaningful version for it that makes sense to + // provide to the server upon stream reconnect: either it will continue to not exist, in which + // case saying nothing is fine, or the server will bring back something new, which we should + // receive regardless (which is the logic that not specifying a version will get you). + // + // So, leave the version map entry present but blank. It will be left out of + // initial_resource_versions messages, but will remind us to explicitly tell the server "I'm + // cancelling my subscription" when we lose interest. + for (const auto& resource_name : removed_resources) { + if (resource_names_.find(resource_name) != resource_names_.end()) { + setResourceWaitingForServer(resource_name); + } + } + stats_.update_success_.inc(); + stats_.update_attempt_.inc(); + stats_.version_.set(HashUtil::xxHash64(version_info)); + ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, + added_resources.size(), removed_resources.size()); + } + void disableInitFetchTimeoutTimer() { if (init_fetch_timeout_timer_) { init_fetch_timeout_timer_->disableTimer(); @@ -223,6 +227,16 @@ class DeltaSubscriptionImpl } } + void drainRequests() { + ENVOY_LOG(trace, "draining discovery requests {}", request_queue_.size()); + while (!request_queue_.empty() && grpc_stream_.checkRateLimitAllowsDrain()) { + // Process the request, if rate limiting is not enabled at all or if it is under rate limit. + sendDiscoveryRequest(request_queue_.front()); + request_queue_.pop(); + } + grpc_stream_.maybeUpdateQueueSizeStat(request_queue_.size()); + } + class ResourceVersion { public: explicit ResourceVersion(absl::string_view version) : version_(version) {} @@ -258,6 +272,26 @@ class DeltaSubscriptionImpl resource_names_.erase(resource_name); } + void queueDiscoveryRequest(const ResourceNameDiff& queue_item) { + request_queue_.push(queue_item); + drainRequests(); + } + + void clearRequestQueue() { + grpc_stream_.maybeUpdateQueueSizeStat(0); + // TODO(fredlas) when we have C++17: request_queue_ = {}; + while (!request_queue_.empty()) { + request_queue_.pop(); + } + } + + // A queue to store requests while rate limited. Note that when requests cannot be sent due to the + // gRPC stream being down, this queue does not store them; rather, they are simply dropped. + std::queue request_queue_; + + GrpcStream + grpc_stream_; + // A map from resource name to per-resource version. The keys of this map are exactly the resource // names we are currently interested in. Those in the waitingForServer state currently don't have // any version for that resource: we need to inform the server if we lose interest in them, but we diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 7519a990e2661..cad96623bea55 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -13,8 +13,9 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClie const Protobuf::MethodDescriptor& service_method, Runtime::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) - : GrpcStream( - std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), + : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, + rate_limit_settings), + local_info_(local_info) { Config::Utility::checkLocalInfo("ads", local_info); } @@ -27,10 +28,10 @@ GrpcMuxImpl::~GrpcMuxImpl() { } } -void GrpcMuxImpl::start() { establishNewStream(); } +void GrpcMuxImpl::start() { grpc_stream_.establishNewStream(); } void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { - if (!grpcStreamAvailable()) { + if (!grpc_stream_.grpcStreamAvailable()) { ENVOY_LOG(debug, "No stream available to sendDiscoveryRequest for {}", type_url); return; // Drop this request; the reconnect will enqueue a new one. } @@ -57,7 +58,7 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { } ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url, request.DebugString()); - sendMessage(request); + grpc_stream_.sendMessage(request); // clear error_detail after the request is sent if it exists. if (api_state_[type_url].request_.has_error_detail()) { @@ -113,29 +114,30 @@ void GrpcMuxImpl::resume(const std::string& type_url) { } } -void GrpcMuxImpl::handleResponse(std::unique_ptr&& message) { +void GrpcMuxImpl::onDiscoveryResponse( + std::unique_ptr&& message) { const std::string& type_url = message->type_url(); ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url, message->version_info()); if (api_state_.count(type_url) == 0) { ENVOY_LOG(warn, "Ignoring the message for type URL {} as it has no current subscribers.", type_url); - // TODO(yuval-k): This should never happen. consider dropping the stream as this is a protocol - // violation + // TODO(yuval-k): This should never happen. consider dropping the stream as this is a + // protocol violation return; } if (api_state_[type_url].watches_.empty()) { // update the nonce as we are processing this response. api_state_[type_url].request_.set_response_nonce(message->nonce()); if (message->resources().empty()) { - // No watches and no resources. This can happen when envoy unregisters from a resource - // that's removed from the server as well. For example, a deleted cluster triggers un-watching - // the ClusterLoadAssignment watch, and at the same time the xDS server sends an empty list of - // ClusterLoadAssignment resources. we'll accept this update. no need to send a discovery - // request, as we don't watch for anything. + // No watches and no resources. This can happen when envoy unregisters from a + // resource that's removed from the server as well. For example, a deleted cluster + // triggers un-watching the ClusterLoadAssignment watch, and at the same time the + // xDS server sends an empty list of ClusterLoadAssignment resources. we'll accept + // this update. no need to send a discovery request, as we don't watch for anything. api_state_[type_url].request_.set_version_info(message->version_info()); } else { - // No watches and we have resources - this should not happen. send a NACK (by not updating - // the version). + // No watches and we have resources - this should not happen. send a NACK (by not + // updating the version). ENVOY_LOG(warn, "Ignoring unwatched type URL {}", type_url); queueDiscoveryRequest(type_url); } @@ -157,9 +159,9 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrresources_.empty()) { watch->callbacks_.onConfigUpdate(message->resources(), message->version_info()); continue; @@ -171,14 +173,14 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrMergeFrom(it->second); } } - // onConfigUpdate should be called only on watches(clusters/routes) that have updates in the - // message for EDS/RDS. + // onConfigUpdate should be called only on watches(clusters/routes) that have + // updates in the message for EDS/RDS. if (found_resources.size() > 0) { watch->callbacks_.onConfigUpdate(found_resources, message->version_info()); } } - // TODO(mattklein123): In the future if we start tracking per-resource versions, we would do - // that tracking here. + // TODO(mattklein123): In the future if we start tracking per-resource versions, we + // would do that tracking here. api_state_[type_url].request_.set_version_info(message->version_info()); } catch (const EnvoyException& e) { for (auto watch : api_state_[type_url].watches_) { @@ -192,13 +194,15 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrcallbacks_.onConfigUpdateFailed(nullptr); @@ -206,5 +210,27 @@ void GrpcMuxImpl::handleEstablishmentFailure() { } } +void GrpcMuxImpl::queueDiscoveryRequest(const std::string& queue_item) { + request_queue_.push(queue_item); + drainRequests(); +} + +void GrpcMuxImpl::clearRequestQueue() { + grpc_stream_.maybeUpdateQueueSizeStat(0); + // TODO(fredlas) when we have C++17: request_queue_ = {}; + while (!request_queue_.empty()) { + request_queue_.pop(); + } +} + +void GrpcMuxImpl::drainRequests() { + while (!request_queue_.empty() && grpc_stream_.checkRateLimitAllowsDrain()) { + // Process the request, if rate limiting is not enabled at all or if it is under rate limit. + sendDiscoveryRequest(request_queue_.front()); + request_queue_.pop(); + } + grpc_stream_.maybeUpdateQueueSizeStat(request_queue_.size()); +} + } // namespace Config } // namespace Envoy diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 5a3672e5fe132..49a961895229d 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/time.h" @@ -19,11 +20,9 @@ namespace Config { /** * ADS API implementation that fetches via gRPC. */ -class GrpcMuxImpl - : public GrpcMux, - public GrpcStream // this string is a type URL -{ +class GrpcMuxImpl : public GrpcMux, + public GrpcStreamCallbacks, + public Logger::Loggable { public: GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, @@ -37,12 +36,18 @@ class GrpcMuxImpl void pause(const std::string& type_url) override; void resume(const std::string& type_url) override; - void sendDiscoveryRequest(const std::string& type_url) override; + void sendDiscoveryRequest(const std::string& type_url); - // GrpcStream - void handleResponse(std::unique_ptr&& message) override; - void handleStreamEstablished() override; - void handleEstablishmentFailure() override; + // Config::GrpcStreamCallbacks + void onStreamEstablished() override; + void onEstablishmentFailure() override; + void onDiscoveryResponse(std::unique_ptr&& message) override; + void onWriteable() override; + + GrpcStream& + grpcStreamForTest() { + return grpc_stream_; + } private: void setRetryTimer(); @@ -85,10 +90,21 @@ class GrpcMuxImpl bool subscribed_{}; }; + // Request queue management logic. + void queueDiscoveryRequest(const std::string& queue_item); + void clearRequestQueue(); + void drainRequests(); + + GrpcStream grpc_stream_; const LocalInfo::LocalInfo& local_info_; std::unordered_map api_state_; // Envoy's dependency ordering. std::list subscriptions_; + + // A queue to store requests while rate limited. Note that when requests cannot be sent due to the + // gRPC stream being down, this queue does not store them; rather, they are simply dropped. + // This string is a type URL. + std::queue request_queue_; }; class NullGrpcMuxImpl : public GrpcMux { diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index 0ef95427caf6f..b80f3f3e9afae 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -1,8 +1,8 @@ #pragma once #include -#include +#include "envoy/config/xds_grpc_context.h" #include "envoy/grpc/async_client.h" #include "common/common/backoff_strategy.h" @@ -15,59 +15,44 @@ namespace Config { // Oversees communication for gRPC xDS implementations (parent to both regular xDS and delta // xDS variants). Reestablishes the gRPC channel when necessary, and provides rate limiting of // requests. -template +template class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, public Logger::Loggable { public: - GrpcStream(Grpc::AsyncClientPtr async_client, const Protobuf::MethodDescriptor& service_method, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, Stats::Scope& scope, + GrpcStream(GrpcStreamCallbacks* callbacks, Grpc::AsyncClientPtr async_client, + const Protobuf::MethodDescriptor& service_method, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) - : async_client_(std::move(async_client)), service_method_(service_method), - control_plane_stats_(generateControlPlaneStats(scope)), random_(random), - time_source_(dispatcher.timeSource()), + : callbacks_(callbacks), async_client_(std::move(async_client)), + service_method_(service_method), control_plane_stats_(generateControlPlaneStats(scope)), + random_(random), time_source_(dispatcher.timeSource()), rate_limiting_enabled_(rate_limit_settings.enabled_) { retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); if (rate_limiting_enabled_) { // Default Bucket contains 100 tokens maximum and refills at 10 tokens/sec. limit_request_ = std::make_unique( rate_limit_settings.max_tokens_, time_source_, rate_limit_settings.fill_rate_); - drain_request_timer_ = dispatcher.createTimer([this]() { drainRequests(); }); + drain_request_timer_ = dispatcher.createTimer([this]() { callbacks_->onWriteable(); }); } backoff_strategy_ = std::make_unique(RETRY_INITIAL_DELAY_MS, RETRY_MAX_DELAY_MS, random_); } - virtual void handleResponse(std::unique_ptr&& message) PURE; - virtual void handleStreamEstablished() PURE; - virtual void handleEstablishmentFailure() PURE; - - // Returns whether the request was actually sent (and so can leave the queue). - virtual void sendDiscoveryRequest(const RequestQueueItem& queue_item) PURE; - - void queueDiscoveryRequest(const RequestQueueItem& queue_item) { - request_queue_.push(queue_item); - drainRequests(); - } - - void clearRequestQueue() { - control_plane_stats_.pending_requests_.sub(request_queue_.size()); - // TODO(fredlas) when we have C++17: request_queue_ = {}; - while (!request_queue_.empty()) { - request_queue_.pop(); - } - } - void establishNewStream() { ENVOY_LOG(debug, "Establishing new gRPC bidi stream for {}", service_method_.DebugString()); + if (stream_ != nullptr) { + ENVOY_LOG(warn, "gRPC bidi stream for {} already exists!", service_method_.DebugString()); + return; + } stream_ = async_client_->start(service_method_, *this); if (stream_ == nullptr) { ENVOY_LOG(warn, "Unable to establish new stream"); - handleEstablishmentFailure(); + callbacks_->onEstablishmentFailure(); setRetryTimer(); return; } control_plane_stats_.connected_state_.set(1); - handleStreamEstablished(); + callbacks_->onStreamEstablished(); } bool grpcStreamAvailable() const { return stream_ != nullptr; } @@ -86,11 +71,11 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, void onReceiveMessage(std::unique_ptr&& message) override { // Reset here so that it starts with fresh backoff interval on next disconnect. backoff_strategy_->reset(); - // Some times during hot restarts this stat's value becomes inconsistent and will continue to - // have 0 till it is reconnected. Setting here ensures that it is consistent with the state of + // Sometimes during hot restarts this stat's value becomes inconsistent and will continue to + // have 0 until it is reconnected. Setting here ensures that it is consistent with the state of // management server connection. control_plane_stats_.connected_state_.set(1); - handleResponse(std::move(message)); + callbacks_->onDiscoveryResponse(std::move(message)); } void onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) override { @@ -101,18 +86,11 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, ENVOY_LOG(warn, "gRPC config stream closed: {}, {}", status, message); stream_ = nullptr; control_plane_stats_.connected_state_.set(0); - handleEstablishmentFailure(); + callbacks_->onEstablishmentFailure(); setRetryTimer(); } -private: - void drainRequests() { - ENVOY_LOG(trace, "draining discovery requests {}", request_queue_.size()); - while (!request_queue_.empty() && checkRateLimitAllowsDrain()) { - // Process the request, if rate limiting is not enabled at all or if it is under rate limit. - sendDiscoveryRequest(request_queue_.front()); - request_queue_.pop(); - } + void maybeUpdateQueueSizeStat(uint64_t size) { // Although request_queue_.push() happens elsewhere, the only time the queue is non-transiently // non-empty is when it remains non-empty after a drain attempt. (The push() doesn't matter // because we always attempt this drain immediately after the push). Basically, a change in @@ -120,8 +98,8 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, // if(>0 || used) to keep this stat from being wrongly marked interesting by a pointless set(0) // and needlessly taking up space. The first time we set(123), used becomes true, and so we will // subsequently always do the set (including set(0)). - if (request_queue_.size() > 0 || control_plane_stats_.pending_requests_.used()) { - control_plane_stats_.pending_requests_.set(request_queue_.size()); + if (size > 0 || control_plane_stats_.pending_requests_.used()) { + control_plane_stats_.pending_requests_.set(size); } } @@ -136,6 +114,7 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, return false; } +private: void setRetryTimer() { retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs())); } @@ -146,6 +125,8 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, POOL_GAUGE_PREFIX(scope, control_plane_prefix))}; } + GrpcStreamCallbacks* const callbacks_; + // TODO(htuch): Make this configurable or some static. const uint32_t RETRY_INITIAL_DELAY_MS = 500; const uint32_t RETRY_MAX_DELAY_MS = 30000; // Do not cross more than 30s @@ -165,9 +146,6 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, TokenBucketPtr limit_request_; const bool rate_limiting_enabled_; Event::TimerPtr drain_request_timer_; - // A queue to store requests while rate limited. Note that when requests cannot be sent due to the - // gRPC stream being down, this queue does not store them; rather, they are simply dropped. - std::queue request_queue_; }; } // namespace Config diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 7fb02b385b3ae..c777873bcbee5 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -78,6 +78,18 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "grpc_stream_test", + srcs = ["grpc_stream_test.cc"], + deps = [ + "//source/common/config:grpc_stream_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + envoy_cc_test( name = "grpc_subscription_impl_test", srcs = ["grpc_subscription_impl_test.cc"], diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index cbe980a526505..98ea598c25794 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -7,7 +7,19 @@ namespace Envoy { namespace Config { namespace { -class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public testing::Test {}; +class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public testing::Test { +protected: + void deliverDiscoveryResponse( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) { + auto message = std::make_unique(); + *message->mutable_resources() = added_resources; + *message->mutable_removed_resources() = removed_resources; + message->set_system_version_info(version_info); + subscription_->onDiscoveryResponse(std::move(message)); + } +}; TEST_F(DeltaSubscriptionImplTest, ResourceGoneLeadsToBlankInitialVersion) { // Envoy is interested in three resources: name1, name2, and name3. @@ -28,8 +40,8 @@ TEST_F(DeltaSubscriptionImplTest, ResourceGoneLeadsToBlankInitialVersion) { resource = add1_2.Add(); resource->set_name("name2"); resource->set_version("version2A"); - subscription_->onConfigUpdate(add1_2, {}, "debugversion1"); - subscription_->handleStreamEstablished(); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + subscription_->onStreamEstablished(); envoy::api::v2::DeltaDiscoveryRequest cur_request = subscription_->internalRequestStateForTest(); EXPECT_EQ("version1A", cur_request.initial_resource_versions().at("name1")); EXPECT_EQ("version2A", cur_request.initial_resource_versions().at("name2")); @@ -46,8 +58,8 @@ TEST_F(DeltaSubscriptionImplTest, ResourceGoneLeadsToBlankInitialVersion) { resource->set_version("version3A"); Protobuf::RepeatedPtrField remove2; *remove2.Add() = "name2"; - subscription_->onConfigUpdate(add1_3, remove2, "debugversion2"); - subscription_->handleStreamEstablished(); + deliverDiscoveryResponse(add1_3, remove2, "debugversion2"); + subscription_->onStreamEstablished(); cur_request = subscription_->internalRequestStateForTest(); EXPECT_EQ("version1B", cur_request.initial_resource_versions().at("name1")); EXPECT_EQ(cur_request.initial_resource_versions().end(), @@ -58,8 +70,8 @@ TEST_F(DeltaSubscriptionImplTest, ResourceGoneLeadsToBlankInitialVersion) { Protobuf::RepeatedPtrField remove1_3; *remove1_3.Add() = "name1"; *remove1_3.Add() = "name3"; - subscription_->onConfigUpdate({}, remove1_3, "debugversion3"); - subscription_->handleStreamEstablished(); + deliverDiscoveryResponse({}, remove1_3, "debugversion3"); + subscription_->onStreamEstablished(); cur_request = subscription_->internalRequestStateForTest(); EXPECT_TRUE(cur_request.initial_resource_versions().empty()); diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index 1db65172761e3..5764b619de08c 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -104,7 +104,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { EXPECT_CALL(callbacks_, onConfigUpdateFailed(_)); expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Internal, "bad config"); } - subscription_->onReceiveMessage(std::move(response)); + subscription_->onDiscoveryResponse(std::move(response)); Mock::VerifyAndClearExpectations(&async_stream_); } diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index f8e9f62cb8169..9c238269ce93c 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -145,7 +145,7 @@ TEST_F(GrpcMuxImplTest, ResetStream) { EXPECT_CALL(random_, random()); ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock. EXPECT_CALL(*timer, enableTimer(_)); - grpc_mux_->onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); + grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); EXPECT_EQ(0, stats_.gauge("control_plane.connected_state").value()); EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, ""); @@ -195,7 +195,7 @@ TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { std::unique_ptr response( new envoy::api::v2::DiscoveryResponse()); response->set_type_url("bar"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } { @@ -209,7 +209,7 @@ TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { expectSendMessage("foo", {"x", "y"}, "", "", Grpc::Status::GrpcStatus::Internal, fmt::format("bar does not match foo type URL in DiscoveryResponse {}", invalid_response->DebugString())); - grpc_mux_->onReceiveMessage(std::move(invalid_response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(invalid_response)); } expectSendMessage("foo", {}, ""); } @@ -243,7 +243,7 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); })); expectSendMessage(type_url, {}, "1"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } } @@ -280,7 +280,7 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); })); expectSendMessage(type_url, {"y", "z", "x"}, "1"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } { @@ -320,7 +320,7 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); })); expectSendMessage(type_url, {"y", "z", "x"}, "2"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } expectSendMessage(type_url, {"x", "y"}, "2"); @@ -346,7 +346,7 @@ TEST_F(GrpcMuxImplTest, MultipleWatcherWithEmptyUpdates) { EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")).Times(0); expectSendMessage(type_url, {"x", "y"}, "1"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); expectSendMessage(type_url, {}, "1"); } @@ -371,7 +371,7 @@ TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& resources, const std::string&) { EXPECT_TRUE(resources.empty()); })); expectSendMessage(type_url, {}, "1"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } // Exactly one test requires a mock time system to provoke behavior that cannot @@ -408,7 +408,7 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) { response->set_version_info("baz"); response->set_nonce("bar"); response->set_type_url("foo"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } }; @@ -461,7 +461,7 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSetti response->set_version_info("baz"); response->set_nonce("bar"); response->set_type_url("foo"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } }; @@ -515,7 +515,7 @@ TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { response->set_version_info("baz"); response->set_nonce("bar"); response->set_type_url("foo"); - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } }; @@ -565,7 +565,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { response->set_type_url(type_url); // This contains zero resources. No discovery request should be sent. - grpc_mux_->onReceiveMessage(std::move(response)); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); // when we add the new subscription version should be 1 and nonce should be bar expectSendMessage(type_url, {"x"}, "1", "bar"); @@ -604,7 +604,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeRejectsResources) { // The message should be rejected. expectSendMessage(type_url, {}, "", "bar"); EXPECT_LOG_CONTAINS("warning", "Ignoring unwatched type URL " + type_url, - grpc_mux_->onReceiveMessage(std::move(response))); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response))); } TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyClusterName) { diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc new file mode 100644 index 0000000000000..b4a97675fe916 --- /dev/null +++ b/test/common/config/grpc_stream_test.cc @@ -0,0 +1,133 @@ +#include "envoy/api/v2/discovery.pb.h" + +#include "common/config/grpc_stream.h" +#include "common/protobuf/protobuf.h" + +#include "test/mocks/config/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/grpc/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Config { +namespace { + +class GrpcStreamTest : public testing::Test { +protected: + GrpcStreamTest() + : async_client_owner_(std::make_unique()), + async_client_(async_client_owner_.get()), + grpc_stream_(&callbacks_, std::move(async_client_owner_), + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints"), + random_, dispatcher_, stats_, rate_limit_settings_) {} + + NiceMock dispatcher_; + Grpc::MockAsyncStream async_stream_; + Stats::IsolatedStoreImpl stats_; + NiceMock random_; + Envoy::Config::RateLimitSettings rate_limit_settings_; + NiceMock callbacks_; + std::unique_ptr async_client_owner_; + Grpc::MockAsyncClient* async_client_; + + GrpcStream grpc_stream_; +}; + +// Tests that establishNewStream() establishes it, a second call does nothing, and a third call +// after the stream was disconnected re-establishes it. +TEST_F(GrpcStreamTest, EstablishNewStream) { + EXPECT_FALSE(grpc_stream_.grpcStreamAvailable()); + // Successful establishment + { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(callbacks_, onStreamEstablished()); + grpc_stream_.establishNewStream(); + EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); + } + // Idempotency: do nothing (other than logging a warning) if already connected + { + EXPECT_CALL(*async_client_, start(_, _)).Times(0); + EXPECT_CALL(callbacks_, onStreamEstablished()).Times(0); + grpc_stream_.establishNewStream(); + EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); + } + grpc_stream_.onRemoteClose(Grpc::Status::GrpcStatus::Ok, ""); + EXPECT_FALSE(grpc_stream_.grpcStreamAvailable()); + // Successful re-establishment + { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(callbacks_, onStreamEstablished()); + grpc_stream_.establishNewStream(); + EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); + } +} + +// A failure in the underlying gRPC machinery should result in grpcStreamAvailable() false. Calling +// sendMessage would segfault. +TEST_F(GrpcStreamTest, FailToEstablishNewStream) { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(nullptr)); + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + grpc_stream_.establishNewStream(); + EXPECT_FALSE(grpc_stream_.grpcStreamAvailable()); +} + +// Checks that sendMessage correctly passes a DiscoveryRequest down to the underlying gRPC +// machinery. +TEST_F(GrpcStreamTest, SendMessage) { + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + grpc_stream_.establishNewStream(); + envoy::api::v2::DiscoveryRequest request; + request.set_response_nonce("grpc_stream_test_noncense"); + EXPECT_CALL(async_stream_, sendMessage(ProtoEq(request), false)); + grpc_stream_.sendMessage(request); +} + +// Tests that, upon a call of the GrpcStream::onReceiveMessage() callback, which is called by the +// underlying gRPC machinery, the received proto will make it up to the GrpcStreamCallbacks that the +// GrpcStream was given. +TEST_F(GrpcStreamTest, ReceiveMessage) { + envoy::api::v2::DiscoveryResponse response_copy; + response_copy.set_type_url("faketypeURL"); + auto response = std::make_unique(response_copy); + envoy::api::v2::DiscoveryResponse received_message; + EXPECT_CALL(callbacks_, onDiscoveryResponse(_)) + .WillOnce([&received_message](std::unique_ptr&& message) { + received_message = *message; + }); + grpc_stream_.onReceiveMessage(std::move(response)); + EXPECT_TRUE(TestUtility::protoEqual(response_copy, received_message)); +} + +// If the value has only ever been 0, the stat should remain unused, including after an attempt to +// write a 0 to it. +TEST_F(GrpcStreamTest, QueueSizeStat) { + grpc_stream_.maybeUpdateQueueSizeStat(0); + EXPECT_FALSE(stats_.gauge("control_plane.pending_requests").used()); + grpc_stream_.maybeUpdateQueueSizeStat(123); + EXPECT_EQ(123, stats_.gauge("control_plane.pending_requests").value()); + grpc_stream_.maybeUpdateQueueSizeStat(0); + EXPECT_EQ(0, stats_.gauge("control_plane.pending_requests").value()); +} + +// Just to add coverage to the no-op implementations of these callbacks (without exposing us to +// crashes from a badly behaved peer like NOT_IMPLEMENTED_GCOVR_EXCL_LINE would). +TEST_F(GrpcStreamTest, HeaderTrailerJustForCodeCoverage) { + Http::HeaderMapPtr response_headers{new Http::TestHeaderMapImpl{}}; + grpc_stream_.onReceiveInitialMetadata(std::move(response_headers)); + Http::TestHeaderMapImpl request_headers; + grpc_stream_.onCreateInitialMetadata(request_headers); + Http::HeaderMapPtr trailers{new Http::TestHeaderMapImpl{}}; + grpc_stream_.onReceiveTrailingMetadata(std::move(trailers)); +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/grpc_subscription_impl_test.cc b/test/common/config/grpc_subscription_impl_test.cc index 1293e48b203f2..490a74c228e39 100644 --- a/test/common/config/grpc_subscription_impl_test.cc +++ b/test/common/config/grpc_subscription_impl_test.cc @@ -37,12 +37,11 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { startSubscription({"cluster0", "cluster1"}); verifyStats(1, 0, 0, 0, 0); - Http::HeaderMapPtr trailers{new Http::TestHeaderMapImpl{}}; - subscription_->grpcMux().onReceiveTrailingMetadata(std::move(trailers)); EXPECT_CALL(callbacks_, onConfigUpdateFailed(_)); EXPECT_CALL(*timer_, enableTimer(_)); EXPECT_CALL(random_, random()); - subscription_->grpcMux().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); + subscription_->grpcMux().grpcStreamForTest().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, + ""); verifyStats(2, 0, 0, 1, 0); verifyControlPlaneStats(0); diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index 11b5a44208ae1..8128edb534f88 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -80,12 +80,6 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { last_cluster_names_ = cluster_names; expectSendMessage(last_cluster_names_, ""); subscription_->start(cluster_names, callbacks_); - // These are just there to add coverage to the null implementations of these - // callbacks. - Http::HeaderMapPtr response_headers{new Http::TestHeaderMapImpl{}}; - subscription_->grpcMux().onReceiveInitialMetadata(std::move(response_headers)); - Http::TestHeaderMapImpl request_headers; - subscription_->grpcMux().onCreateInitialMetadata(request_headers); } void deliverConfigUpdate(const std::vector& cluster_names, @@ -115,7 +109,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { expectSendMessage(last_cluster_names_, version_, Grpc::Status::GrpcStatus::Internal, "bad config"); } - subscription_->grpcMux().onReceiveMessage(std::move(response)); + subscription_->grpcMux().onDiscoveryResponse(std::move(response)); Mock::VerifyAndClearExpectations(&async_stream_); } diff --git a/test/mocks/config/BUILD b/test/mocks/config/BUILD index f23e0337aa505..4ffbef91dbbfb 100644 --- a/test/mocks/config/BUILD +++ b/test/mocks/config/BUILD @@ -15,6 +15,7 @@ envoy_cc_mock( deps = [ "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", + "//include/envoy/config:xds_grpc_context_interface", "//source/common/config:resources_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/api/v2:cds_cc", diff --git a/test/mocks/config/mocks.cc b/test/mocks/config/mocks.cc index 9b50adef0d8e0..72783296cc9fb 100644 --- a/test/mocks/config/mocks.cc +++ b/test/mocks/config/mocks.cc @@ -13,6 +13,9 @@ MockGrpcMuxWatch::~MockGrpcMuxWatch() { cancel(); } MockGrpcMux::MockGrpcMux() {} MockGrpcMux::~MockGrpcMux() {} +MockGrpcStreamCallbacks::MockGrpcStreamCallbacks() {} +MockGrpcStreamCallbacks::~MockGrpcStreamCallbacks() {} + GrpcMuxWatchPtr MockGrpcMux::subscribe(const std::string& type_url, const std::vector& resources, GrpcMuxCallbacks& callbacks) { diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index f40ae034ae919..9cc82f2380834 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -3,6 +3,7 @@ #include "envoy/api/v2/eds.pb.h" #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" +#include "envoy/config/xds_grpc_context.h" #include "common/config/resources.h" #include "common/protobuf/utility.h" @@ -47,7 +48,7 @@ class MockSubscription : public Subscription { class MockGrpcMuxWatch : public GrpcMuxWatch { public: MockGrpcMuxWatch(); - virtual ~MockGrpcMuxWatch(); + ~MockGrpcMuxWatch(); MOCK_METHOD0(cancel, void()); }; @@ -55,7 +56,7 @@ class MockGrpcMuxWatch : public GrpcMuxWatch { class MockGrpcMux : public GrpcMux { public: MockGrpcMux(); - virtual ~MockGrpcMux(); + ~MockGrpcMux(); MOCK_METHOD0(start, void()); MOCK_METHOD3(subscribe_, @@ -70,7 +71,7 @@ class MockGrpcMux : public GrpcMux { class MockGrpcMuxCallbacks : public GrpcMuxCallbacks { public: MockGrpcMuxCallbacks(); - virtual ~MockGrpcMuxCallbacks(); + ~MockGrpcMuxCallbacks(); MOCK_METHOD2(onConfigUpdate, void(const Protobuf::RepeatedPtrField& resources, const std::string& version_info)); @@ -78,5 +79,17 @@ class MockGrpcMuxCallbacks : public GrpcMuxCallbacks { MOCK_METHOD1(resourceName, std::string(const ProtobufWkt::Any& resource)); }; +class MockGrpcStreamCallbacks : public GrpcStreamCallbacks { +public: + MockGrpcStreamCallbacks(); + ~MockGrpcStreamCallbacks(); + + MOCK_METHOD0(onStreamEstablished, void()); + MOCK_METHOD0(onEstablishmentFailure, void()); + MOCK_METHOD1(onDiscoveryResponse, + void(std::unique_ptr&& message)); + MOCK_METHOD0(onWriteable, void()); +}; + } // namespace Config } // namespace Envoy From 8e03f31f02206c91fb67526fb45c672821c200a1 Mon Sep 17 00:00:00 2001 From: Fred Douglas <43351173+fredlas@users.noreply.github.com> Date: Tue, 9 Apr 2019 18:14:20 -0400 Subject: [PATCH 082/165] test: some MATCHER_Ps now print nice stuff upon failure (#6524) ProtoEq previously just gave you "[a bunch of bytes in hex] is not equal to [other hex bytes]." I figured RepeatedProtoEq would also benefit. While I was in there, I also changed the existing HeaderMapEqualIgnoreOrder printing into the same nice format. Risk Level: none Testing: test only Signed-off-by: Fred Douglas --- test/test_common/utility.h | 40 +++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 2c1464bf05505..2ed49861ffd79 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -527,14 +527,44 @@ ApiPtr createApiForTest(Event::TimeSystem& time_system); ApiPtr createApiForTest(Stats::Store& stat_store, Event::TimeSystem& time_system); } // namespace Api -MATCHER_P(HeaderMapEqualIgnoreOrder, rhs, "") { - *result_listener << *rhs << " is not equal to " << *arg; - return TestUtility::headerMapEqualIgnoreOrder(*arg, *rhs); +MATCHER_P(HeaderMapEqualIgnoreOrder, expected, "") { + const bool equal = TestUtility::headerMapEqualIgnoreOrder(*arg, *expected); + if (!equal) { + *result_listener << "\n" + << "========================Expected header map:========================\n" + << *expected + << "-----------------is not equal to actual header map:-----------------\n" + << *arg + << "====================================================================\n"; + } + return equal; } -MATCHER_P(ProtoEq, rhs, "") { return TestUtility::protoEqual(arg, rhs); } +MATCHER_P(ProtoEq, expected, "") { + const bool equal = TestUtility::protoEqual(arg, expected); + if (!equal) { + *result_listener << "\n" + << "==========================Expected proto:===========================\n" + << expected.DebugString() + << "------------------is not equal to actual proto:---------------------\n" + << arg.DebugString() + << "====================================================================\n"; + } + return equal; +} -MATCHER_P(RepeatedProtoEq, rhs, "") { return TestUtility::repeatedPtrFieldEqual(arg, rhs); } +MATCHER_P(RepeatedProtoEq, expected, "") { + const bool equal = TestUtility::repeatedPtrFieldEqual(arg, expected); + if (!equal) { + *result_listener << "\n" + << "=======================Expected repeated:===========================\n" + << RepeatedPtrUtil::debugString(expected) << "\n" + << "-----------------is not equal to actual repeated:-------------------\n" + << RepeatedPtrUtil::debugString(arg) << "\n" + << "====================================================================\n"; + } + return equal; +} MATCHER_P(Percent, rhs, "") { envoy::type::FractionalPercent expected; From d44c93a3fcb2e970829051b1a2f39800f652c370 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Tue, 9 Apr 2019 18:19:56 -0400 Subject: [PATCH 083/165] clang-tidy fixes (#6523) Description: minor fixes to fix spelling and make clang-tidy happy Risk Level: low Testing: unit tests Signed-off-by: Elisha Ziskind --- api/envoy/api/v2/rds.proto | 2 +- source/common/http/path_utility.cc | 4 ++-- .../filters/network/redis_proxy/conn_pool_impl.cc | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto index 5dd58d62d9dc2..18147b68174d3 100644 --- a/api/envoy/api/v2/rds.proto +++ b/api/envoy/api/v2/rds.proto @@ -46,7 +46,7 @@ service RouteDiscoveryService { } // Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for -// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggerred +// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered // during the processing of an HTTP request if a route for the request cannot be resolved. The // :ref:`resource_names_subscribe ` // field contains a list of virtual host names or aliases to track. The contents of an alias would diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index 74af7039a4bd4..56ce3204a4689 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -45,10 +45,10 @@ bool PathUtil::canonicalPath(HeaderEntry& path_header) { query_pos == original_path.npos ? absl::string_view{} : absl::string_view{original_path.data() + query_pos, original_path.size() - query_pos}; - if (query_suffix.size() > 0) { + if (!query_suffix.empty()) { normalized_path.insert(normalized_path.end(), query_suffix.begin(), query_suffix.end()); } - path_header.value(std::move(normalized_path)); + path_header.value(normalized_path); return true; } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index deaa1147bf869..b5cba06ec52d2 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -25,16 +25,16 @@ InstanceImpl::InstanceImpl( } Common::Redis::Client::PoolRequest* -InstanceImpl::makeRequest(const std::string& key, const Common::Redis::RespValue& value, +InstanceImpl::makeRequest(const std::string& key, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks& callbacks) { - return tls_->getTyped().makeRequest(key, value, callbacks); + return tls_->getTyped().makeRequest(key, request, callbacks); } Common::Redis::Client::PoolRequest* InstanceImpl::makeRequestToHost(const std::string& host_address, - const Common::Redis::RespValue& value, + const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks& callbacks) { - return tls_->getTyped().makeRequestToHost(host_address, value, callbacks); + return tls_->getTyped().makeRequestToHost(host_address, request, callbacks); } InstanceImpl::ThreadLocalPool::ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, From abd625a6a77f4831a359416489691f0a81af694c Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Tue, 9 Apr 2019 17:44:03 -0700 Subject: [PATCH 084/165] jwt_authn: allow Jwt requirement to be specified by other filters via filterState (#6398) Read a string value from stream_info.FilterState, and use it to look up a JwtRequirement map in the filter config. This is the PR to implement #6399 Risk Level: Low Signed-off-by: Wayne Zhang --- .../http/jwt_authn/v2alpha/config.proto | 32 ++++ docs/root/intro/version_history.rst | 1 + .../extensions/filters/http/jwt_authn/BUILD | 1 + .../filters/http/jwt_authn/filter.cc | 3 +- .../filters/http/jwt_authn/filter_config.h | 28 +++- test/extensions/filters/http/jwt_authn/BUILD | 16 ++ .../http/jwt_authn/filter_config_test.cc | 111 +++++++++++++ .../http/jwt_authn/filter_integration_test.cc | 149 +++++++++++++++++- .../filters/http/jwt_authn/filter_test.cc | 12 +- 9 files changed, 342 insertions(+), 11 deletions(-) create mode 100644 test/extensions/filters/http/jwt_authn/filter_config_test.cc diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index f63e834d157e0..2f8a0ec29c170 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -339,6 +339,32 @@ message RequirementRule { JwtRequirement requires = 2; } +// This message specifies Jwt requirements based on stream_info.filterState. +// This FilterState should use `Router::StringAccessor` object to set a string value. +// Other HTTP filters can use it to specify Jwt requirements dynamically. +// +// Example: +// +// .. code-block:: yaml +// +// name: jwt_selector +// requires: +// issuer_1: +// provider_name: issuer1 +// issuer_2: +// provider_name: issuer2 +// +// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, +// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. +message FilterStateRule { + // The filter state name to retrieve the `Router::StringAccessor` object. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A map of string keys to requirements. The string key is the string value + // in the FilterState with the name specified in the *name* field above. + map requires = 3; +} + // This is the Envoy HTTP filter config for JWT authentication. // // For example: @@ -432,4 +458,10 @@ message JwtAuthentication { // - provider_name: provider2 // repeated RequirementRule rules = 2; + + // This message specifies Jwt requirements based on stream_info.filterState. + // Other HTTP filters can use it to specify Jwt requirements dynamically. + // The *rules* field above is checked first, if it could not find any matches, + // check this one. + FilterStateRule filter_state_rules = 3; } diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index c0498d313c85e..98a5bbc05314c 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -57,6 +57,7 @@ Version history * outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. * mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy` for more details. * performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). +* jwt_authn: added :ref:`filter_state_rules ` to allow specifying requirements from filterState by other filters. * ratelimit: removed deprecated rate limit configuration from bootstrap. * redis: added :ref:`hashtagging ` to guarantee a given key's upstream. * redis: added :ref:`latency stats ` for commands. diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index c2db41e23144c..b566159039b3c 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -100,6 +100,7 @@ envoy_cc_library( deps = [ ":jwks_cache_lib", ":matchers_lib", + "//include/envoy/router:string_accessor_interface", "//include/envoy/server:filter_config_interface", "//include/envoy/stats:stats_macros", "//include/envoy/thread_local:thread_local_interface", diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 57e4e4347b788..27a7aa72bb1fb 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -26,7 +26,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool) state_ = Calling; stopped_ = false; // Verify the JWT token, onComplete() will be called when completed. - const auto* verifier = config_->findVerifier(headers); + const auto* verifier = + config_->findVerifier(headers, decoder_callbacks_->streamInfo().filterState()); if (!verifier) { onComplete(Status::Ok); } else { diff --git a/source/extensions/filters/http/jwt_authn/filter_config.h b/source/extensions/filters/http/jwt_authn/filter_config.h index 3fb73ffec8dd1..622a1e50f5c4d 100644 --- a/source/extensions/filters/http/jwt_authn/filter_config.h +++ b/source/extensions/filters/http/jwt_authn/filter_config.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/router/string_accessor.h" #include "envoy/server/filter_config.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -9,6 +10,8 @@ #include "extensions/filters/http/jwt_authn/matcher.h" #include "extensions/filters/http/jwt_authn/verifier.h" +#include "absl/container/flat_hash_map.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -77,6 +80,15 @@ class FilterConfig : public Logger::Loggable, public AuthFac Matcher::create(rule), Verifier::create(rule.requires(), proto_config_.providers(), *this, getExtractor())); } + + if (proto_config_.has_filter_state_rules()) { + filter_state_name_ = proto_config_.filter_state_rules().name(); + for (const auto& it : proto_config_.filter_state_rules().requires()) { + filter_state_verifiers_.emplace( + it.first, + Verifier::create(it.second, proto_config_.providers(), *this, getExtractor())); + } + } } JwtAuthnFilterStats& stats() { return stats_; } @@ -97,12 +109,22 @@ class FilterConfig : public Logger::Loggable, public AuthFac const Extractor& getExtractor() const { return *extractor_; } // Finds the matcher that matched the header - virtual const Verifier* findVerifier(const Http::HeaderMap& headers) const { + virtual const Verifier* findVerifier(const Http::HeaderMap& headers, + const StreamInfo::FilterState& filter_state) const { for (const auto& pair : rule_pairs_) { if (pair.matcher_->matches(headers)) { return pair.verifier_.get(); } } + if (!filter_state_name_.empty() && !filter_state_verifiers_.empty() && + filter_state.hasData(filter_state_name_)) { + const auto& state = filter_state.getDataReadOnly(filter_state_name_); + ENVOY_LOG(debug, "use filter state value {} to find verifier.", state.asString()); + const auto& it = filter_state_verifiers_.find(state.asString()); + if (it != filter_state_verifiers_.end()) { + return it->second.get(); + } + } return nullptr; } @@ -139,6 +161,10 @@ class FilterConfig : public Logger::Loggable, public AuthFac ExtractorConstPtr extractor_; // The list of rule matchers. std::vector rule_pairs_; + // The filter state name to lookup filter_state_rules. + std::string filter_state_name_; + // The filter state verifier map from filter_state_rules. + absl::flat_hash_map filter_state_verifiers_; TimeSource& time_source_; Api::Api& api_; }; diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index c09a8a9a97749..a9c149ad06a5f 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -51,6 +51,19 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "filter_config_test", + srcs = ["filter_config_test.cc"], + extension_name = "envoy.filters.http.jwt_authn", + deps = [ + "//source/common/router:string_accessor_lib", + "//source/common/stream_info:filter_state_lib", + "//source/extensions/filters/http/jwt_authn:config", + "//test/extensions/filters/http/jwt_authn:test_common_lib", + "//test/mocks/server:server_mocks", + ], +) + envoy_extension_cc_test( name = "filter_factory_test", srcs = ["filter_factory_test.cc"], @@ -98,6 +111,9 @@ envoy_extension_cc_test( srcs = ["filter_integration_test.cc"], extension_name = "envoy.filters.http.jwt_authn", deps = [ + "//source/common/router:string_accessor_lib", + "//source/extensions/filters/http/common:empty_http_filter_config_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", "//source/extensions/filters/http/jwt_authn:config", "//test/config:utility_lib", "//test/extensions/filters/http/jwt_authn:test_common_lib", diff --git a/test/extensions/filters/http/jwt_authn/filter_config_test.cc b/test/extensions/filters/http/jwt_authn/filter_config_test.cc new file mode 100644 index 0000000000000..65c4382026885 --- /dev/null +++ b/test/extensions/filters/http/jwt_authn/filter_config_test.cc @@ -0,0 +1,111 @@ +#include "common/router/string_accessor_impl.h" +#include "common/stream_info/filter_state_impl.h" + +#include "extensions/filters/http/jwt_authn/filter_config.h" + +#include "test/extensions/filters/http/jwt_authn/test_common.h" +#include "test/mocks/server/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using ::envoy::api::v2::core::Metadata; +using ::envoy::config::filter::http::jwt_authn::v2alpha::JwtAuthentication; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace JwtAuthn { +namespace { + +TEST(HttpJwtAuthnFilterConfigTest, FindByMatch) { + const char config[] = R"( +providers: + provider1: + issuer: issuer1 + local_jwks: + inline_string: jwks +rules: +- match: + path: /path1 + requires: + provider_name: provider1 +)"; + + JwtAuthentication proto_config; + MessageUtil::loadFromYaml(config, proto_config); + + NiceMock context; + FilterConfig filter_conf(proto_config, "", context); + + StreamInfo::FilterStateImpl filter_state; + EXPECT_TRUE(filter_conf.findVerifier( + Http::TestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/path1"}, + }, + filter_state) != nullptr); + + EXPECT_TRUE(filter_conf.findVerifier( + Http::TestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/path2"}, + }, + filter_state) == nullptr); +} + +TEST(HttpJwtAuthnFilterConfigTest, FindByFilterState) { + const char config[] = R"( +providers: + provider1: + issuer: issuer1 + local_jwks: + inline_string: jwks + provider2: + issuer: issuer2 + local_jwks: + inline_string: jwks +filter_state_rules: + name: jwt_selector + requires: + selector1: + provider_name: provider1 + selector2: + provider_name: provider2 +)"; + + JwtAuthentication proto_config; + MessageUtil::loadFromYaml(config, proto_config); + + NiceMock context; + FilterConfig filter_conf(proto_config, "", context); + + // Empty filter_state + StreamInfo::FilterStateImpl filter_state1; + EXPECT_TRUE(filter_conf.findVerifier(Http::TestHeaderMapImpl(), filter_state1) == nullptr); + + // Wrong selector + StreamInfo::FilterStateImpl filter_state2; + filter_state2.setData("jwt_selector", + std::make_unique("wrong_selector"), + StreamInfo::FilterState::StateType::ReadOnly); + EXPECT_TRUE(filter_conf.findVerifier(Http::TestHeaderMapImpl(), filter_state2) == nullptr); + + // correct selector + StreamInfo::FilterStateImpl filter_state3; + filter_state3.setData("jwt_selector", std::make_unique("selector1"), + StreamInfo::FilterState::StateType::ReadOnly); + EXPECT_TRUE(filter_conf.findVerifier(Http::TestHeaderMapImpl(), filter_state3) != nullptr); + + // correct selector + StreamInfo::FilterStateImpl filter_state4; + filter_state4.setData("jwt_selector", std::make_unique("selector2"), + StreamInfo::FilterState::StateType::ReadOnly); + EXPECT_TRUE(filter_conf.findVerifier(Http::TestHeaderMapImpl(), filter_state4) != nullptr); +} + +} // namespace +} // namespace JwtAuthn +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc index 7693ceb3eae74..c7c5d0ad2f026 100644 --- a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc @@ -1,5 +1,9 @@ #include "envoy/config/filter/http/jwt_authn/v2alpha/config.pb.h" +#include "common/router/string_accessor_impl.h" + +#include "extensions/filters/http/common/empty_http_filter_config.h" +#include "extensions/filters/http/common/pass_through_filter.h" #include "extensions/filters/http/well_known_names.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" @@ -14,9 +18,50 @@ namespace HttpFilters { namespace JwtAuthn { namespace { -std::string getFilterConfig(bool use_local_jwks) { +const char HeaderToFilterStateFilterName[] = "envoy.filters.http.header_to_filter_state_for_test"; + +// This filter extracts a string header from "header" and +// save it into FilterState as name "state" as read-only Router::StringAccessor. +class HeaderToFilterStateFilter : public Http::PassThroughDecoderFilter { +public: + HeaderToFilterStateFilter(const std::string& header, const std::string& state) + : header_(header), state_(state) {} + + Http::FilterHeadersStatus decodeHeaders(Http::HeaderMap& headers, bool) override { + const Http::HeaderEntry* entry = headers.get(header_); + if (entry) { + decoder_callbacks_->streamInfo().filterState().setData( + state_, std::make_unique(entry->value().getStringView()), + StreamInfo::FilterState::StateType::ReadOnly); + } + return Http::FilterHeadersStatus::Continue; + } + +private: + Http::LowerCaseString header_; + std::string state_; +}; + +class HeaderToFilterStateFilterConfig : public Common::EmptyHttpFilterConfig { +public: + HeaderToFilterStateFilterConfig() + : Common::EmptyHttpFilterConfig(HeaderToFilterStateFilterName) {} + + Http::FilterFactoryCb createFilter(const std::string&, Server::Configuration::FactoryContext&) { + return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter( + std::make_shared("jwt_selector", "jwt_selector")); + }; + } +}; + +// perform static registration +REGISTER_FACTORY(HeaderToFilterStateFilterConfig, + Server::Configuration::NamedHttpFilterConfigFactory); + +std::string getAuthFilterConfig(const std::string& config_str, bool use_local_jwks) { JwtAuthentication proto_config; - MessageUtil::loadFromYaml(ExampleConfig, proto_config); + MessageUtil::loadFromYaml(config_str, proto_config); if (use_local_jwks) { auto& provider0 = (*proto_config.mutable_providers())[std::string(ProviderName)]; @@ -31,6 +76,10 @@ std::string getFilterConfig(bool use_local_jwks) { return MessageUtil::getJsonStringFromMessage(filter); } +std::string getFilterConfig(bool use_local_jwks) { + return getAuthFilterConfig(ExampleConfig, use_local_jwks); +} + typedef HttpProtocolIntegrationTest LocalJwksIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, LocalJwksIntegrationTest, @@ -85,6 +134,24 @@ TEST_P(LocalJwksIntegrationTest, ExpiredToken) { EXPECT_STREQ("401", response->headers().Status()->value().c_str()); } +TEST_P(LocalJwksIntegrationTest, MissingToken) { + config_helper_.addFilter(getFilterConfig(true)); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + }); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_STREQ("401", response->headers().Status()->value().c_str()); +} + TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { config_helper_.addFilter(getFilterConfig(true)); initialize(); @@ -128,6 +195,84 @@ TEST_P(LocalJwksIntegrationTest, NoRequiresPath) { EXPECT_STREQ("200", response->headers().Status()->value().c_str()); } +// This test verifies JwtRequirement specified from filer state rules +TEST_P(LocalJwksIntegrationTest, FilterStateRequirement) { + // A config with metadata rules. + const std::string auth_filter_conf = R"( + providers: + example_provider: + issuer: https://example.com + audiences: + - example_service + filter_state_rules: + name: jwt_selector + requires: + example_provider: + provider_name: example_provider +)"; + + config_helper_.addFilter(getAuthFilterConfig(auth_filter_conf, true)); + config_helper_.addFilter(absl::StrCat("name: ", HeaderToFilterStateFilterName)); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + struct TestCase { + std::vector> extra_headers; + std::string expected_status; + }; + + const TestCase test_cases[] = { + // Case1: not set metadata, so Jwt is not required, expect 200 + { + // Empty extra headers + {}, + "200", + }, + + // Case2: requirement is set in the metadata, but missing token, expect 401 + { + // selector header, but not token header + { + {"jwt_selector", "example_provider"}, + }, + "401", + }, + + // Case 3: requirement is set in the metadata, token is good, expect 200 + { + // selector header, and token header + { + {"jwt_selector", "example_provider"}, + {"Authorization", "Bearer " + std::string(GoodToken)}, + }, + "200", + }, + }; + + for (const auto& test : test_cases) { + Http::TestHeaderMapImpl headers{ + {":method", "GET"}, + {":path", "/foo"}, + {":scheme", "http"}, + {":authority", "host"}, + }; + for (const auto& h : test.extra_headers) { + headers.addCopy(h.first, h.second); + } + auto response = codec_client_->makeHeaderOnlyRequest(headers); + + if (test.expected_status == "200") { + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, true); + } + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ(test.expected_status, response->headers().Status()->value().c_str()); + } +} + // The test case with a fake upstream for remote Jwks server. class RemoteJwksIntegrationTest : public HttpProtocolIntegrationTest { public: diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index 04a661a7b101b..ecfcd6c9582f9 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -13,6 +13,7 @@ using ::google::jwt_verify::Status; using testing::_; using testing::Invoke; +using testing::Return; namespace Envoy { namespace Extensions { @@ -31,7 +32,8 @@ class MockFilterConfig : public FilterConfig { const ::envoy::config::filter::http::jwt_authn::v2alpha::JwtAuthentication& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) : FilterConfig(proto_config, stats_prefix, context) {} - MOCK_CONST_METHOD1(findVerifier, const Verifier*(const Http::HeaderMap& headers)); + MOCK_CONST_METHOD2(findVerifier, const Verifier*(const Http::HeaderMap& headers, + const StreamInfo::FilterState& filter_state)); }; class FilterTest : public testing::Test { @@ -45,9 +47,7 @@ class FilterTest : public testing::Test { } void setupMockConfig() { - EXPECT_CALL(*mock_config_.get(), findVerifier(_)).WillOnce(Invoke([&](const Http::HeaderMap&) { - return mock_verifier_.get(); - })); + EXPECT_CALL(*mock_config_.get(), findVerifier(_, _)).WillOnce(Return(mock_verifier_.get())); } JwtAuthentication proto_config_; @@ -175,9 +175,7 @@ TEST_F(FilterTest, OutBoundFailure) { // Test verifies that if no route matched requirement, then request is allowed. TEST_F(FilterTest, TestNoRouteMatched) { - EXPECT_CALL(*mock_config_.get(), findVerifier(_)).WillOnce(Invoke([&](const Http::HeaderMap&) { - return nullptr; - })); + EXPECT_CALL(*mock_config_.get(), findVerifier(_, _)).WillOnce(Return(nullptr)); auto headers = Http::TestHeaderMapImpl{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); From cfc514546bc0284536893cca5fa43d7128edcd35 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 9 Apr 2019 20:58:53 -0700 Subject: [PATCH 085/165] build_image: install clang-8 (#6533) Signed-off-by: Lizan Zhou --- ci/build_container/build_container_centos.sh | 2 +- ci/build_container/build_container_ubuntu.sh | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ci/build_container/build_container_centos.sh b/ci/build_container/build_container_centos.sh index abc52e5e325d0..bf45bcc22a658 100755 --- a/ci/build_container/build_container_centos.sh +++ b/ci/build_container/build_container_centos.sh @@ -21,7 +21,7 @@ chmod u+x "./${BAZEL_INSTALLER}" rm "./${BAZEL_INSTALLER}" # SLES 11 has older glibc than CentOS 7, so pre-built binary for it works on CentOS 7 -LLVM_VERSION=7.0.1 +LLVM_VERSION=8.0.0 LLVM_RELEASE="clang+llvm-${LLVM_VERSION}-x86_64-linux-sles11.3" curl -OL "https://releases.llvm.org/${LLVM_VERSION}/${LLVM_RELEASE}.tar.xz" tar Jxf "${LLVM_RELEASE}.tar.xz" diff --git a/ci/build_container/build_container_ubuntu.sh b/ci/build_container/build_container_ubuntu.sh index 3f4b3f0b5f638..e66061523915c 100755 --- a/ci/build_container/build_container_ubuntu.sh +++ b/ci/build_container/build_container_ubuntu.sh @@ -7,11 +7,11 @@ apt-get update export DEBIAN_FRONTEND=noninteractive apt-get install -y wget software-properties-common make cmake git python python-pip python3 python3-pip \ unzip bc libtool ninja-build automake zip time golang gdb strace wireshark tshark tcpdump lcov -# clang 7. +# clang 8. wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - -apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main" +apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-8 main" apt-get update -apt-get install -y clang-7 clang-format-7 clang-tidy-7 lld-7 libc++-7-dev libc++abi-7-dev +apt-get install -y clang-8 clang-format-8 clang-tidy-8 lld-8 libc++-8-dev libc++abi-8-dev # gcc-7 add-apt-repository -y ppa:ubuntu-toolchain-r/test apt update From e03f6f7629ea563a56f8cb024558a8bce0fbfc1d Mon Sep 17 00:00:00 2001 From: John Howard Date: Wed, 10 Apr 2019 05:12:16 -0700 Subject: [PATCH 086/165] docs: update load_balancing_weight behavior description (#6467) The documentation before was misleading; the weights are not actually multiplied together. This change brings the docs here in line with the actual behavior and the docs in other places Signed-off-by: John Howard --- api/envoy/api/v2/eds.proto | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index 6a687ce9aa1a9..54f9d08c6f843 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -43,9 +43,10 @@ service EndpointDiscoveryService { // // With EDS, each cluster is treated independently from a LB perspective, with // LB taking place between the Localities within a cluster and at a finer -// granularity between the hosts within a locality. For a given cluster, the -// effective weight of a host is its load_balancing_weight multiplied by the -// load_balancing_weight of its Locality. +// granularity between the hosts within a locality. The percentage of traffic +// for each endpoint is determined by both its load_balancing_weight, and the +// load_balancing_weight of its locality. First, a locality will be selected, +// then an endpoint within that locality will be chose based on its weight. message ClusterLoadAssignment { // Name of the cluster. This will be the :ref:`service_name // ` value if specified From 19894aca58f21f73dfbdb939ce8597c58de8a574 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 10 Apr 2019 09:20:49 -0400 Subject: [PATCH 087/165] release: flipping deprecated features to fatal-by-default (#6509) Making the following deprecated fields fatal-by-default UNSUPPORTED_REST_LEGACY from config_source.proto use_alpha from ext_authz.proto enabled from route.proto type from fault.proto runtime_key from route.proto Risk Level: High (first time using this process - it will likely cause problems for someone) Testing: tests pass. Docs Changes: n/a Release Notes: no Signed-off-by: Alyssa Wilk --- api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto | 3 ++- source/common/runtime/runtime_features.cc | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 12ce0d2757896..9eb8f4f078173 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -172,7 +172,8 @@ message TcpProxy { // list of strings with each string in CIDR notation. Source and destination ports are // specified as single strings containing a comma-separated list of ports and/or port ranges. // - DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; + // Deprecation pending https://github.com/envoyproxy/envoy/issues/4457 + DeprecatedV1 deprecated_v1 = 6; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 7e63701b1c882..7ea7af077bc58 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -47,6 +47,11 @@ constexpr const char* disallowed_features[] = { // Acts as both a test entry for deprecated.proto and a marker for the Envoy // deprecation scripts. "envoy.deprecated_features.deprecated.proto:is_deprecated_fatal", + "envoy.deprecated_features.config_source.proto:UNSUPPORTED_REST_LEGACY", + "envoy.deprecated_features.ext_authz.proto:use_alpha", + "envoy.deprecated_features.route.proto:enabled", + "envoy.deprecated_features.fault.proto:type", + "envoy.deprecated_features.route.proto:runtime_key", }; RuntimeFeatures::RuntimeFeatures() { From 6d1e3af228e01fecee49ccbd7bf130562611b780 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 10 Apr 2019 11:13:54 -0400 Subject: [PATCH 088/165] runtime: fixing a stat bug (#6538) Fixing a bug where we were over-enthusiastically treating all fields as deprecated for stats, not just deprecated fields. Risk Level: low (stats bugfix) Testing: new unit test Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/protobuf/utility.cc | 5 +++-- test/common/protobuf/utility_test.cc | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 9c33b50a38e2a..cf5bcfbe74021 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -157,8 +157,9 @@ void MessageUtil::checkForDeprecation(const Protobuf::Message& message, Runtime: // Allow runtime to be null both to not crash if this is called before server initialization, // and so proto validation works in context where runtime singleton is not set up (e.g. // standalone config validation utilities) - if (runtime && !runtime->snapshot().deprecatedFeatureEnabled( - absl::StrCat("envoy.deprecated_features.", filename, ":", field->name()))) { + if (runtime && field->options().deprecated() && + !runtime->snapshot().deprecatedFeatureEnabled( + absl::StrCat("envoy.deprecated_features.", filename, ":", field->name()))) { warn_only = false; } diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index ebd17ad9699c5..aa674c29bf99e 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -472,6 +472,7 @@ TEST_F(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) { base.set_not_deprecated("foo"); // Fatal checks for a non-deprecated field should cause no problem. MessageUtil::checkForDeprecation(base); + EXPECT_EQ(0, store_.gauge("runtime.deprecated_feature_use").value()); } TEST_F(DeprecatedFieldsTest, IndividualFieldDeprecated) { @@ -481,6 +482,7 @@ TEST_F(DeprecatedFieldsTest, IndividualFieldDeprecated) { EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", MessageUtil::checkForDeprecation(base)); + EXPECT_EQ(1, store_.gauge("runtime.deprecated_feature_use").value()); } // Use of a deprecated and disallowed field should result in an exception. From d826a89fb2700d99c6da3705cfbc450196eb8982 Mon Sep 17 00:00:00 2001 From: Derek Date: Wed, 10 Apr 2019 09:56:46 -0700 Subject: [PATCH 089/165] tests: add test for invalid cors config on route (#6531) Signed-off-by: Derek Schaller --- test/common/router/config_impl_test.cc | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index dc2eaf2f178e6..86d5bd0fa422b 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -4137,6 +4137,30 @@ TEST_F(RoutePropertyTest, TestRouteCorsLegacyConfig) { EXPECT_EQ(cors_policy->allowCredentials(), true); } +TEST_F(RoutePropertyTest, TestBadCorsConfig) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: default + domains: + - "*" + routes: + - match: + prefix: "/api" + route: + cluster: ats + cors: + enabled: 0 +)EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, + "Unable to parse JSON as proto " + "(INVALID_ARGUMENT:(virtual_hosts[0].routes[0].route.cors.enabled.value): invalid value 0 " + "for type TYPE_BOOL): " + + Json::Factory::loadFromYamlString(yaml)->asJsonString()); +} + TEST_F(RouteMatcherTest, Decorator) { const std::string yaml = R"EOF( virtual_hosts: From 694136f1ccb9a6f31312950bc5960eb840991d14 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Wed, 10 Apr 2019 09:57:46 -0700 Subject: [PATCH 090/165] test: cds api test configs to v2 (#6535) Signed-off-by: Derek Argueta --- test/common/upstream/BUILD | 2 ++ test/common/upstream/cds_api_impl_test.cc | 34 +++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index b65aa9ef552ab..912895f9c0932 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -19,10 +19,12 @@ envoy_cc_test( "//source/common/config:utility_lib", "//source/common/http:message_lib", "//source/common/json:json_loader_lib", + "//source/common/protobuf:utility_lib", "//source/common/upstream:cds_api_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2/core:config_source_cc", ], ) diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index 917e261b50731..83fbd093d8330 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -3,9 +3,12 @@ #include #include +#include "envoy/api/v2/core/config_source.pb.validate.h" + #include "common/config/utility.h" #include "common/http/message_impl.h" #include "common/json/json_loader.h" +#include "common/protobuf/utility.h" #include "common/upstream/cds_api_impl.h" #include "test/common/upstream/utility.h" @@ -36,19 +39,16 @@ class CdsApiImplTest : public testing::Test { CdsApiImplTest() : request_(&cm_.async_client_), api_(Api::createApiForTest(store_)) {} void setup() { - const std::string config_json = R"EOF( - { - "cluster": { - "name": "foo_cluster" - } - } + const std::string config_yaml = R"EOF( +api_config_source: + cluster_names: + - foo_cluster + refresh_delay: 1s + api_type: REST )EOF"; - Json::ObjectSharedPtr config = Json::Factory::loadFromString(config_json); envoy::api::v2::core::ConfigSource cds_config; - Config::Utility::translateCdsConfig(*config, cds_config); - cds_config.mutable_api_config_source()->set_api_type( - envoy::api::v2::core::ApiConfigSource::REST); + MessageUtil::loadFromYamlAndValidate(config_yaml, cds_config); cluster_map_.emplace("foo_cluster", mock_cluster_); EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(mock_cluster_, info()).Times(AnyNumber()); @@ -288,19 +288,17 @@ TEST_F(CdsApiImplTest, ConfigUpdateAddsSecondClusterEvenIfFirstThrows) { } TEST_F(CdsApiImplTest, InvalidOptions) { - const std::string config_json = R"EOF( - { - "cluster": { - "name": "foo_cluster" - } - } + const std::string config_yaml = R"EOF( +api_config_source: + cluster_names: + - foo_cluster + refresh_delay: 1s )EOF"; - Json::ObjectSharedPtr config = Json::Factory::loadFromString(config_json); local_info_.node_.set_cluster(""); local_info_.node_.set_id(""); envoy::api::v2::core::ConfigSource cds_config; - Config::Utility::translateCdsConfig(*config, cds_config); + MessageUtil::loadFromYamlAndValidate(config_yaml, cds_config); EXPECT_THROW( CdsApiImpl::create(cds_config, cm_, dispatcher_, random_, local_info_, store_, *api_), EnvoyException); From d931ad1d153c6e463951c3a97f8d6af75ecbeebc Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Wed, 10 Apr 2019 09:58:30 -0700 Subject: [PATCH 091/165] test: update http ratelimit for v2 (#6536) Signed-off-by: Derek Argueta --- test/extensions/filters/http/ratelimit/BUILD | 1 - .../filters/http/ratelimit/config_test.cc | 9 +++++---- .../filters/http/ratelimit/ratelimit_test.cc | 15 --------------- 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index 38922688bff54..a5823809359d2 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -18,7 +18,6 @@ envoy_extension_cc_test( deps = [ "//source/common/buffer:buffer_lib", "//source/common/common:empty_string", - "//source/common/config:filter_json_lib", "//source/common/http:context_lib", "//source/common/http:headers_lib", "//source/extensions/filters/common/ratelimit:ratelimit_lib", diff --git a/test/extensions/filters/http/ratelimit/config_test.cc b/test/extensions/filters/http/ratelimit/config_test.cc index a13ba9e6c5621..4a1c2a9211b1b 100644 --- a/test/extensions/filters/http/ratelimit/config_test.cc +++ b/test/extensions/filters/http/ratelimit/config_test.cc @@ -36,7 +36,7 @@ TEST(RateLimitFilterConfigTest, RatelimitCorrectProto) { )EOF"; envoy::config::filter::http::rate_limit::v2::RateLimit proto_config{}; - MessageUtil::loadFromYaml(yaml, proto_config); + MessageUtil::loadFromYamlAndValidate(yaml, proto_config); NiceMock context; @@ -68,12 +68,13 @@ TEST(RateLimitFilterConfigTest, RateLimitFilterEmptyProto) { TEST(RateLimitFilterConfigTest, BadRateLimitFilterConfig) { const std::string yaml = R"EOF( - domain: test - timeout: 20 + domain: foo + route_key: my_route )EOF"; envoy::config::filter::http::rate_limit::v2::RateLimit proto_config{}; - EXPECT_THROW(MessageUtil::loadFromYaml(yaml, proto_config), EnvoyException); + EXPECT_THROW_WITH_REGEX(MessageUtil::loadFromYamlAndValidate(yaml, proto_config), EnvoyException, + "INVALID_ARGUMENT:route_key: Cannot find field"); } } // namespace diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 895a79292f78c..3f365d2e77993 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -4,7 +4,6 @@ #include "common/buffer/buffer_impl.h" #include "common/common/empty_string.h" -#include "common/config/filter_json.h" #include "common/http/context_impl.h" #include "common/http/headers.h" @@ -95,20 +94,6 @@ class HttpRateLimitFilterTest : public testing::Test { Http::ContextImpl http_context_; }; -TEST_F(HttpRateLimitFilterTest, BadConfig) { - const std::string filter_config = R"EOF( - { - "domain": "foo", - "route_key" : "my_route" - } - )EOF"; - - Json::ObjectSharedPtr json_config = Json::Factory::loadFromString(filter_config); - envoy::config::filter::http::rate_limit::v2::RateLimit proto_config{}; - EXPECT_THROW(Config::FilterJson::translateHttpRateLimitFilter(*json_config, proto_config), - Json::Exception); -} - TEST_F(HttpRateLimitFilterTest, NoRoute) { SetUpTest(filter_config_); From f7687d5643f545d42343ed5662294b798bf51f2a Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Wed, 10 Apr 2019 10:08:14 -0700 Subject: [PATCH 092/165] HCM: add compile option of path normalization (#6519) Motivation: alternative approach to enable path normalization add config "--define path_normalization_by_default=true" to bazel With this bazel config, the path normalization in HCM will - ignore the runtime - turn on the normalization if the xDS is not aware of this config Signed-off-by: Yuchen Dai --- bazel/BUILD | 5 +++++ bazel/README.md | 2 ++ bazel/envoy_build_system.bzl | 10 +++++++++- ci/do_ci.sh | 1 + .../network/http_connection_manager/config.cc | 19 ++++++++++++------- .../http_connection_manager/config_test.cc | 15 ++++++++++++--- test/mocks/runtime/mocks.h | 12 ++++++++++++ 7 files changed, 53 insertions(+), 11 deletions(-) diff --git a/bazel/BUILD b/bazel/BUILD index 06dec8d89f45e..90271e7d9699e 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -105,6 +105,11 @@ config_setting( values = {"define": "google_grpc=disabled"}, ) +config_setting( + name = "enable_path_normalization_by_default", + values = {"define": "path_normalization_by_default=true"}, +) + cc_proto_library( name = "grpc_health_proto", deps = ["@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only"], diff --git a/bazel/README.md b/bazel/README.md index 9a678077f69ee..6429e7174bdc4 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -366,6 +366,8 @@ The following optional features can be enabled on the Bazel build command-line: release builds so that the condition is not evaluated. This option has no effect in debug builds. * memory-debugging (scribbling over memory after allocation and before freeing) with `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL. +* Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with + `--define path_normalization_by_default=true`. Note this still could be disable by explicit xDS config. ## Disabling extensions diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index c68fbe0d53262..2b75b1e018fae 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -83,7 +83,8 @@ def envoy_copts(repository, test = False): "//conditions:default": [], }) + envoy_select_hot_restart(["-DENVOY_HOT_RESTART"], repository) + \ envoy_select_perf_annotation(["-DENVOY_PERF_ANNOTATION"]) + \ - envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository) + envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository) + \ + envoy_select_path_normalization_by_default(["-DENVOY_NORMALIZE_PATH_BY_DEFAULT"], repository) def envoy_static_link_libstdcpp_linkopts(): return envoy_select_force_libcpp( @@ -648,6 +649,13 @@ def envoy_select_hot_restart(xs, repository = ""): "//conditions:default": xs, }) +# Select the given values if default path normalization is on in the current build. +def envoy_select_path_normalization_by_default(xs, repository = ""): + return select({ + repository + "//bazel:enable_path_normalization_by_default": xs, + "//conditions:default": [], + }) + def envoy_select_perf_annotation(xs): return select({ "@envoy//bazel:enable_perf_annotation": xs, diff --git a/ci/do_ci.sh b/ci/do_ci.sh index aa2105a0184aa..fa4f1dbb1e254 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -176,6 +176,7 @@ elif [[ "$1" == "bazel.compile_time_options" ]]; then --define boringssl=fips \ --define log_debug_assert_in_release=enabled \ --define quiche=enabled \ + --define path_normalization_by_default=true \ " setup_clang_toolchain # This doesn't go into CI but is available for developer convenience. diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 0b0b294913e7d..1ee38efffe569 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -151,13 +151,18 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( context_.listenerScope())), proxy_100_continue_(config.proxy_100_continue()), delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)), - normalize_path_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, normalize_path, - // TODO(htuch): we should have a - // boolean variant of featureEnabled() - // here. - context.runtime().snapshot().featureEnabled( - "http_connection_manager.normalize_path", 0))) { + normalize_path_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + config, normalize_path, + // TODO(htuch): we should have a + // boolean variant of featureEnabled() + // here. + context.runtime().snapshot().featureEnabled("http_connection_manager.normalize_path", +#ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT + 100 +#else + 0 +#endif + ))) { route_config_provider_ = Router::RouteConfigProviderUtil::create(config, context_, stats_prefix_, route_config_provider_manager_); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index aec8ee0da0b38..6feb80d6d37f4 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -15,6 +15,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::An; using testing::ContainerEq; using testing::Return; @@ -187,6 +188,7 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { } // Validated that by default we don't normalize paths +// unless set build flag path_normalization_by_default=true TEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) { const std::string yaml_string = R"EOF( stat_prefix: ingress_http @@ -196,9 +198,16 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) { - name: envoy.router )EOF"; + EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) + .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, + &Runtime::MockSnapshot::featureEnabledDefault)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, date_provider_, route_config_provider_manager_); +#ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT + EXPECT_TRUE(config.shouldNormalizePath()); +#else EXPECT_FALSE(config.shouldNormalizePath()); +#endif } // Validated that we normalize paths with runtime override when not specified. @@ -212,7 +221,7 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathRuntime) { )EOF"; EXPECT_CALL(context_.runtime_loader_.snapshot_, - featureEnabled("http_connection_manager.normalize_path", 0)) + featureEnabled("http_connection_manager.normalize_path", An())) .WillOnce(Return(true)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, date_provider_, route_config_provider_manager_); @@ -231,7 +240,7 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathTrue) { )EOF"; EXPECT_CALL(context_.runtime_loader_.snapshot_, - featureEnabled("http_connection_manager.normalize_path", 0)) + featureEnabled("http_connection_manager.normalize_path", An())) .Times(0); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, date_provider_, route_config_provider_manager_); @@ -250,7 +259,7 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathFalse) { )EOF"; EXPECT_CALL(context_.runtime_loader_.snapshot_, - featureEnabled("http_connection_manager.normalize_path", 0)) + featureEnabled("http_connection_manager.normalize_path", An())) .Times(0); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, date_provider_, route_config_provider_manager_); diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 151d3e2112ca3..76bac6b3c3923 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -27,6 +27,18 @@ class MockSnapshot : public Snapshot { MockSnapshot(); ~MockSnapshot() override; + // Provide a default implementation of mocked featureEnabled/2. + bool featureEnabledDefault(const std::string&, uint64_t default_value) { + if (default_value == 0) { + return false; + } else if (default_value == 100) { + return true; + } else { + throw std::invalid_argument("Not implemented yet. You may want to set expectation of mocked " + "featureEnabled() instead."); + } + } + MOCK_CONST_METHOD1(deprecatedFeatureEnabled, bool(const std::string& key)); MOCK_CONST_METHOD1(runtimeFeatureEnabled, bool(absl::string_view key)); MOCK_CONST_METHOD2(featureEnabled, bool(const std::string& key, uint64_t default_value)); From 814dd9facff27391a42744309f7765dc94971e40 Mon Sep 17 00:00:00 2001 From: James Synge Date: Wed, 10 Apr 2019 13:08:36 -0400 Subject: [PATCH 093/165] tidy: Replace x.size() == 0 with x.empty() (#6511) Signed-off-by: James Synge --- .clang-tidy | 4 ++-- source/common/buffer/buffer_impl.cc | 2 +- source/common/common/hex.cc | 2 +- source/common/config/grpc_mux_impl.cc | 2 +- source/common/http/http1/conn_pool.cc | 2 +- source/common/router/config_impl.cc | 6 +++--- source/common/router/header_formatter.cc | 2 +- source/common/router/header_parser.cc | 2 +- source/common/router/rds_impl.cc | 2 +- source/common/router/shadow_writer_impl.cc | 2 +- source/common/upstream/health_checker_impl.cc | 2 +- source/common/upstream/load_balancer_impl.cc | 8 ++++---- source/common/upstream/load_stats_reporter.cc | 2 +- source/common/upstream/original_dst_cluster.cc | 2 +- source/common/upstream/subset_lb.cc | 2 +- source/common/upstream/upstream_impl.cc | 2 +- .../http/header_to_metadata/header_to_metadata_filter.cc | 4 ++-- source/extensions/filters/http/jwt_authn/verifier.cc | 2 +- .../filters/network/http_connection_manager/config.cc | 2 +- .../extensions/tracers/zipkin/span_context_extractor.cc | 2 +- source/extensions/tracers/zipkin/util.cc | 2 +- source/extensions/tracers/zipkin/zipkin_core_types.cc | 2 +- source/extensions/tracers/zipkin/zipkin_tracer_impl.cc | 2 +- source/server/http/admin.cc | 2 +- source/server/listener_manager_impl.cc | 2 +- test/common/upstream/health_checker_impl_test.cc | 2 +- test/common/upstream/load_balancer_impl_test.cc | 2 +- .../header_to_metadata/header_to_metadata_filter_test.cc | 4 ++-- .../filters/network/thrift_proxy/integration.cc | 2 +- .../network/zookeeper_proxy/zookeeper_filter_test.cc | 2 +- test/integration/h1_capture_direct_response_fuzz_test.cc | 2 +- test/integration/h1_capture_fuzz_test.cc | 2 +- 32 files changed, 40 insertions(+), 40 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 0794aa66661f2..a62ee3c944146 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,7 +1,7 @@ -Checks: 'clang-diagnostic-*,clang-analyzer-*,abseil-*,bugprone-*,modernize-*,performance-*,readability-redundant-*,readability-braces-around-statements' +Checks: 'clang-diagnostic-*,clang-analyzer-*,abseil-*,bugprone-*,modernize-*,performance-*,readability-redundant-*,readability-braces-around-statements,readability-container-size-empty' #TODO(lizan): grow this list, fix possible warnings and make more checks as error -WarningsAsErrors: 'bugprone-assert-side-effect,modernize-make-shared,modernize-make-unique,readability-redundant-smartptr-get,readability-braces-around-statements,readability-redundant-string-cstr,bugprone-use-after-move' +WarningsAsErrors: 'bugprone-assert-side-effect,modernize-make-shared,modernize-make-unique,readability-redundant-smartptr-get,readability-braces-around-statements,readability-redundant-string-cstr,bugprone-use-after-move,readability-container-size-empty' CheckOptions: - key: bugprone-assert-side-effect.AssertMacros diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 88253bdd124b4..bdd3e0a9536bd 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -69,7 +69,7 @@ void OwnedImpl::prepend(absl::string_view data) { // only seems to happen the original buffer was created via // addBufferFragment(), this forces the code execution path in // evbuffer_prepend related to immutable buffers. - if (data.size() == 0) { + if (data.empty()) { return; } evbuffer_prepend(buffer_.get(), data.data(), data.size()); diff --git a/source/common/common/hex.cc b/source/common/common/hex.cc index c910fc86cde8a..b987282d58094 100644 --- a/source/common/common/hex.cc +++ b/source/common/common/hex.cc @@ -27,7 +27,7 @@ std::string Hex::encode(const uint8_t* data, size_t length) { } std::vector Hex::decode(const std::string& hex_string) { - if (hex_string.size() == 0 || hex_string.size() % 2 != 0) { + if (hex_string.empty() || hex_string.size() % 2 != 0) { return {}; } diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index cad96623bea55..c962b0e9dd838 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -175,7 +175,7 @@ void GrpcMuxImpl::onDiscoveryResponse( } // onConfigUpdate should be called only on watches(clusters/routes) that have // updates in the message for EDS/RDS. - if (found_resources.size() > 0) { + if (!found_resources.empty()) { watch->callbacks_.onConfigUpdate(found_resources, message->version_info()); } } diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 119711fc1505a..e2f7a85656541 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -107,7 +107,7 @@ ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_dec } // If we have no connections at all, make one no matter what so we don't starve. - if ((ready_clients_.size() == 0 && busy_clients_.size() == 0) || can_create_connection) { + if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { createNewConnection(); } diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 3cfdbc63f596c..d17e0e56f4ca5 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -970,11 +970,11 @@ RouteMatcher::RouteMatcher(const envoy::api::v2::RouteConfiguration& route_confi throw EnvoyException(fmt::format("Only a single wildcard domain is permitted")); } default_virtual_host_ = virtual_host; - } else if (domain.size() > 0 && '*' == domain[0]) { + } else if (!domain.empty() && '*' == domain[0]) { duplicate_found = !wildcard_virtual_host_suffixes_[domain.size() - 1] .emplace(domain.substr(1), virtual_host) .second; - } else if (domain.size() > 0 && '*' == domain[domain.size() - 1]) { + } else if (!domain.empty() && '*' == domain[domain.size() - 1]) { duplicate_found = !wildcard_virtual_host_prefixes_[domain.size() - 1] .emplace(domain.substr(0, domain.size() - 1), virtual_host) .second; @@ -1076,7 +1076,7 @@ VirtualHostImpl::virtualClusterFromEntries(const Http::HeaderMap& headers) const } } - if (virtual_clusters_.size() > 0) { + if (!virtual_clusters_.empty()) { return &VIRTUAL_CLUSTER_CATCH_ALL; } diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 80077aac6fe78..60f5ca8c5efd4 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -136,7 +136,7 @@ parsePerRequestStateField(absl::string_view param_str) { throw EnvoyException(formatPerRequestStateParseException(param_str)); } modified_param_str = modified_param_str.substr(1, modified_param_str.size() - 2); // trim parens - if (modified_param_str.size() == 0) { + if (modified_param_str.empty()) { throw EnvoyException(formatPerRequestStateParseException(param_str)); } diff --git a/source/common/router/header_parser.cc b/source/common/router/header_parser.cc index baaf7c797065b..cf547422bcde0 100644 --- a/source/common/router/header_parser.cc +++ b/source/common/router/header_parser.cc @@ -209,7 +209,7 @@ parseInternal(const envoy::api::v2::core::HeaderValueOption& header_value_option formatters.emplace_back(new PlainHeaderFormatter(unescape(literal), append)); } - ASSERT(formatters.size() > 0); + ASSERT(!formatters.empty()); if (formatters.size() == 1) { return std::move(formatters[0]); diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 2aa821ad838ee..733462c79319a 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -234,7 +234,7 @@ RouteConfigProviderManagerImpl::dumpRouteConfigs() const { // of this code, locking the weak_ptr will not fail. auto subscription = element.second.lock(); ASSERT(subscription); - ASSERT(subscription->route_config_providers_.size() > 0); + ASSERT(!subscription->route_config_providers_.empty()); if (subscription->config_info_) { auto* dynamic_config = config_dump->mutable_dynamic_route_configs()->Add(); diff --git a/source/common/router/shadow_writer_impl.cc b/source/common/router/shadow_writer_impl.cc index 0d1f8a25ecef5..baea1de78fa68 100644 --- a/source/common/router/shadow_writer_impl.cc +++ b/source/common/router/shadow_writer_impl.cc @@ -24,7 +24,7 @@ void ShadowWriterImpl::shadow(const std::string& cluster, Http::MessagePtr&& req ASSERT(!request->headers().Host()->value().empty()); // Switch authority to add a shadow postfix. This allows upstream logging to make more sense. auto parts = StringUtil::splitToken(request->headers().Host()->value().c_str(), ":"); - ASSERT(parts.size() > 0 && parts.size() <= 2); + ASSERT(!parts.empty() && parts.size() <= 2); request->headers().Host()->value( parts.size() == 2 ? absl::StrJoin(parts, "-shadow:") : absl::StrCat(request->headers().Host()->value().c_str(), "-shadow")); diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 719975959a3e6..4f9b3d110bc04 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -306,7 +306,7 @@ TcpHealthCheckMatcher::MatchSegments TcpHealthCheckMatcher::loadProtoBytes( for (const auto& entry : byte_array) { const auto decoded = Hex::decode(entry.text()); - if (decoded.size() == 0) { + if (decoded.empty()) { throw EnvoyException(fmt::format("invalid hex string '{}'", entry.text())); } result.push_back(decoded); diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index d4248eaec3ab6..838d2fb32f0a6 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -136,7 +136,7 @@ void LoadBalancerBase::recalculatePerPriorityState(uint32_t priority, // by the overprovisioning factor. HostSet& host_set = *priority_set.hostSetsPerPriority()[priority]; per_priority_health.get()[priority] = 0; - if (host_set.hosts().size() > 0) { + if (!host_set.hosts().empty()) { // Each priority level's health is ratio of healthy hosts to total number of hosts in a priority // multiplied by overprovisioning factor of 1.4 and capped at 100%. It means that if all // hosts are healthy that priority's health is 100%*1.4=140% and is capped at 100% which results @@ -442,11 +442,11 @@ HostConstSharedPtr LoadBalancerBase::chooseHost(LoadBalancerContext* context) { bool LoadBalancerBase::isGlobalPanic(const HostSet& host_set) { uint64_t global_panic_threshold = std::min( 100, runtime_.snapshot().getInteger(RuntimePanicThreshold, default_healthy_panic_percent_)); - double healthy_percent = host_set.hosts().size() == 0 + double healthy_percent = host_set.hosts().empty() ? 0 : 100.0 * host_set.healthyHosts().size() / host_set.hosts().size(); - double degraded_percent = host_set.hosts().size() == 0 + double degraded_percent = host_set.hosts().empty() ? 0 : 100.0 * host_set.degradedHosts().size() / host_set.hosts().size(); // If the % of healthy hosts in the cluster is less than our panic threshold, we use all hosts. @@ -692,7 +692,7 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont return host; } else { const HostVector& hosts_to_use = hostSourceToHosts(hosts_source); - if (hosts_to_use.size() == 0) { + if (hosts_to_use.empty()) { return nullptr; } return unweightedHostPick(hosts_to_use, hosts_source); diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index a9bd4aa1faa27..46b0b0d6336cb 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -59,7 +59,7 @@ void LoadStatsReporter::sendLoadStatsRequest() { for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { ENVOY_LOG(trace, "Load report locality count {}", host_set->hostsPerLocality().get().size()); for (auto& hosts : host_set->hostsPerLocality().get()) { - ASSERT(hosts.size() > 0); + ASSERT(!hosts.empty()); uint64_t rq_success = 0; uint64_t rq_error = 0; uint64_t rq_active = 0; diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 15aa74facc998..a523d2453281f 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -175,7 +175,7 @@ void OriginalDstCluster::cleanup() { } } - if (to_be_removed.size() > 0) { + if (!to_be_removed.empty()) { priority_set_.updateHosts(0, HostSetImpl::partitionHosts(new_hosts, HostsPerLocalityImpl::empty()), {}, {}, to_be_removed, absl::nullopt); diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 19d68fb4b19be..d35672d88b0a9 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -318,7 +318,7 @@ bool SubsetLoadBalancer::hostMatches(const SubsetMetadata& kvs, const Host& host host_metadata.filter_metadata().find(Config::MetadataFilters::get().ENVOY_LB); if (filter_it == host_metadata.filter_metadata().end()) { - return kvs.size() == 0; + return kvs.empty(); } const ProtobufWkt::Struct& data_struct = filter_it->second; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 988098842a0e6..0a688e3d59345 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -102,7 +102,7 @@ parseClusterSocketOptions(const envoy::api::v2::Cluster& config, // Cluster socket_options trump cluster manager wide. if (bind_config.socket_options().size() + config.upstream_bind_config().socket_options().size() > 0) { - auto socket_options = config.upstream_bind_config().socket_options().size() > 0 + auto socket_options = !config.upstream_bind_config().socket_options().empty() ? config.upstream_bind_config().socket_options() : bind_config.socket_options(); Network::Socket::appendOptions( diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index 8075c0b0ff148..05ce99cc98158 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -30,7 +30,7 @@ Config::Config(const envoy::config::filter::http::header_to_metadata::v2::Config bool Config::configToVector(const ProtobufRepeatedRule& proto_rules, HeaderToMetadataRules& vector) { - if (proto_rules.size() == 0) { + if (proto_rules.empty()) { ENVOY_LOG(debug, "no rules provided"); return false; } @@ -174,7 +174,7 @@ void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, } // Any matching rules? - if (structs_by_namespace.size() > 0) { + if (!structs_by_namespace.empty()) { for (auto const& entry : structs_by_namespace) { callbacks.streamInfo().setDynamicMetadata(entry.first, entry.second); } diff --git a/source/extensions/filters/http/jwt_authn/verifier.cc b/source/extensions/filters/http/jwt_authn/verifier.cc index 9b73a81453ef9..f9b4f3a59c2d7 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.cc +++ b/source/extensions/filters/http/jwt_authn/verifier.cc @@ -54,7 +54,7 @@ class ContextImpl : public Verifier::Context { } void setPayload() { - if (payload_.fields().size() > 0) { + if (!payload_.fields().empty()) { callback_->setPayload(payload_); } } diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 1ee38efffe569..0014ccc55ef60 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -284,7 +284,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( throw EnvoyException( fmt::format("Error: multiple upgrade configs with the same name: '{}'", name)); } - if (upgrade_config.filters().size() > 0) { + if (!upgrade_config.filters().empty()) { std::unique_ptr factories = std::make_unique(); for (int32_t i = 0; i < upgrade_config.filters().size(); i++) { processFilter(upgrade_config.filters(i), i, name, *factories); diff --git a/source/extensions/tracers/zipkin/span_context_extractor.cc b/source/extensions/tracers/zipkin/span_context_extractor.cc index 9779a4d5faaa3..50caff9db8c04 100644 --- a/source/extensions/tracers/zipkin/span_context_extractor.cc +++ b/source/extensions/tracers/zipkin/span_context_extractor.cc @@ -105,7 +105,7 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa } auto b3_parent_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID); - if (b3_parent_id_entry && b3_parent_id_entry->value().size() > 0) { + if (b3_parent_id_entry && !b3_parent_id_entry->value().empty()) { const std::string pspid = b3_parent_id_entry->value().c_str(); if (!StringUtil::atoull(pspid.c_str(), parent_id, 16)) { throw ExtractorException(fmt::format("Invalid parent span id {}", pspid.c_str())); diff --git a/source/extensions/tracers/zipkin/util.cc b/source/extensions/tracers/zipkin/util.cc index 447621f0a59de..d18eff673b042 100644 --- a/source/extensions/tracers/zipkin/util.cc +++ b/source/extensions/tracers/zipkin/util.cc @@ -35,7 +35,7 @@ void Util::addArrayToJson(std::string& target, const std::vector& j const std::string& field_name) { std::string stringified_json_array = "["; - if (json_array.size() > 0) { + if (!json_array.empty()) { stringified_json_array += json_array[0]; for (auto it = json_array.begin() + 1; it != json_array.end(); it++) { stringified_json_array += ","; diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.cc b/source/extensions/tracers/zipkin/zipkin_core_types.cc index 101d7d6a45164..f3087912f6908 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.cc +++ b/source/extensions/tracers/zipkin/zipkin_core_types.cc @@ -253,7 +253,7 @@ void Span::finish() { } void Span::setTag(const std::string& name, const std::string& value) { - if (name.size() > 0 && value.size() > 0) { + if (!name.empty() && !value.empty()) { addBinaryAnnotation(BinaryAnnotation(name, value)); } } diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 23195aea038dd..84b5f26530b1c 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -75,7 +75,7 @@ Driver::Driver(const envoy::config::trace::v2::ZipkinConfig& zipkin_config, cluster_ = cm_.get(zipkin_config.collector_cluster())->info(); std::string collector_endpoint = ZipkinCoreConstants::get().DEFAULT_COLLECTOR_ENDPOINT; - if (zipkin_config.collector_endpoint().size() > 0) { + if (!zipkin_config.collector_endpoint().empty()) { collector_endpoint = zipkin_config.collector_endpoint(); } diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 7a8ab7c09c791..8fb1fde7d74e5 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -586,7 +586,7 @@ Http::Code AdminImpl::handlerLogging(absl::string_view url, Http::HeaderMap&, Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); Http::Code rc = Http::Code::OK; - if (query_params.size() > 0 && !changeLogLevel(query_params)) { + if (!query_params.empty() && !changeLogLevel(query_params)) { response.add("usage: /logging?= (change single level)\n"); response.add("usage: /logging?level= (change all levels)\n"); response.add("levels: "); diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 8598f2779d3dd..fc64828f82a7b 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -185,7 +185,7 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st config.tcp_fast_open_queue_length().value())); } - if (config.socket_options().size() > 0) { + if (!config.socket_options().empty()) { addListenSocketOptions( Network::SocketOptionFactory::buildLiteralOptions(config.socket_options())); } diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 0e8d56ca695bc..aca25bf4bc82b 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -2809,7 +2809,7 @@ class GrpcHealthCheckerImplTestBase { } void respondResponseSpec(size_t index, ResponseSpec&& spec) { - const bool trailers_empty = spec.trailers.size() == 0U; + const bool trailers_empty = spec.trailers.empty(); const bool end_stream_on_headers = spec.body_chunks.empty() && trailers_empty; auto response_headers = std::make_unique(); for (const auto& header : spec.response_headers) { diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 6645c2cdaf5cf..2d167d09d746b 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -203,7 +203,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionFuzz) { // Either we selected one of the healthy hosts or we failed to select anything and defaulted // to healthy. EXPECT_TRUE(!hs.first.healthyHosts().empty() || - (hs.first.healthyHosts().size() == 0 && hs.first.degradedHosts().size() == 0)); + (hs.first.healthyHosts().empty() && hs.first.degradedHosts().empty())); break; case LoadBalancerBase::HostAvailability::Degraded: EXPECT_FALSE(hs.first.degradedHosts().empty()); diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index 567f837ba3e55..0a9b120c3e400 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -52,7 +52,7 @@ class HeaderToMetadataTest : public testing::Test { MATCHER_P(MapEq, rhs, "") { const ProtobufWkt::Struct& obj = arg; - EXPECT_TRUE(rhs.size() > 0); + EXPECT_TRUE(!rhs.empty()); for (auto const& entry : rhs) { EXPECT_EQ(obj.fields().at(entry.first).string_value(), entry.second); } @@ -61,7 +61,7 @@ MATCHER_P(MapEq, rhs, "") { MATCHER_P(MapEqNum, rhs, "") { const ProtobufWkt::Struct& obj = arg; - EXPECT_TRUE(rhs.size() > 0); + EXPECT_TRUE(!rhs.empty()); for (auto const& entry : rhs) { EXPECT_EQ(obj.fields().at(entry.first).number_value(), entry.second); } diff --git a/test/extensions/filters/network/thrift_proxy/integration.cc b/test/extensions/filters/network/thrift_proxy/integration.cc index c80ee6930ba0b..d5d7f1828298d 100644 --- a/test/extensions/filters/network/thrift_proxy/integration.cc +++ b/test/extensions/filters/network/thrift_proxy/integration.cc @@ -67,7 +67,7 @@ void BaseThriftIntegrationTest::preparePayloads(const PayloadOptions& options, args.push_back(*options.service_name_); } - if (options.headers_.size() > 0) { + if (!options.headers_.empty()) { args.push_back("-H"); std::vector headers; diff --git a/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc b/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc index 182ea1bba5700..aeffd31e11af8 100644 --- a/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc @@ -16,7 +16,7 @@ namespace NetworkFilters { namespace ZooKeeperProxy { bool protoMapEq(const ProtobufWkt::Struct& obj, const std::map& rhs) { - EXPECT_TRUE(rhs.size() > 0); + EXPECT_TRUE(!rhs.empty()); for (auto const& entry : rhs) { EXPECT_EQ(obj.fields().at(entry.first).string_value(), entry.second); } diff --git a/test/integration/h1_capture_direct_response_fuzz_test.cc b/test/integration/h1_capture_direct_response_fuzz_test.cc index c34ffa551d99d..aaff549c7eb01 100644 --- a/test/integration/h1_capture_direct_response_fuzz_test.cc +++ b/test/integration/h1_capture_direct_response_fuzz_test.cc @@ -27,7 +27,7 @@ void H1FuzzIntegrationTest::initialize() { } DEFINE_PROTO_FUZZER(const test::integration::CaptureFuzzTestCase& input) { - RELEASE_ASSERT(TestEnvironment::getIpVersionsForTest().size() > 0, ""); + RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), ""); const auto ip_version = TestEnvironment::getIpVersionsForTest()[0]; H1FuzzIntegrationTest h1_fuzz_integration_test(ip_version); h1_fuzz_integration_test.replay(input); diff --git a/test/integration/h1_capture_fuzz_test.cc b/test/integration/h1_capture_fuzz_test.cc index 704e8d10d0205..d6b161a5ea17a 100644 --- a/test/integration/h1_capture_fuzz_test.cc +++ b/test/integration/h1_capture_fuzz_test.cc @@ -5,7 +5,7 @@ void H1FuzzIntegrationTest::initialize() { HttpIntegrationTest::initialize(); } DEFINE_PROTO_FUZZER(const test::integration::CaptureFuzzTestCase& input) { // Pick an IP version to use for loopback, it doesn't matter which. - RELEASE_ASSERT(TestEnvironment::getIpVersionsForTest().size() > 0, ""); + RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), ""); const auto ip_version = TestEnvironment::getIpVersionsForTest()[0]; H1FuzzIntegrationTest h1_fuzz_integration_test(ip_version); h1_fuzz_integration_test.replay(input); From c8b28210628338332c8bd2a669131dd7f4ef8d63 Mon Sep 17 00:00:00 2001 From: Michael Puncel Date: Wed, 10 Apr 2019 13:09:31 -0400 Subject: [PATCH 094/165] retry policy: check retry conditions before remaining retry count (#6526) This commit reorders some of the checks in the HTTP retry state implementation. Previously, it would check if there are remaining retries before checking if a retry would be attempted based on the retry policy and event that's occuring. This means that shouldRetry might return NoRetryLimitExceeded for cases that should not be retried anyway. This bubbles up to the router and causes a response flag that might be confusing to end users. In particular this happens if the final retry attempt succeeds. Signed-off-by: Michael Puncel --- source/common/router/retry_state_impl.cc | 7 ++++--- test/common/router/retry_state_impl_test.cc | 20 ++++++++++++++++++-- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index fa4a18c0ab122..84e1863176031 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -158,14 +158,15 @@ RetryStatus RetryStateImpl::shouldRetry(bool would_retry, DoRetryCallback callba resetRetry(); + if (!would_retry) { + return RetryStatus::No; + } + if (retries_remaining_ == 0) { return RetryStatus::NoRetryLimitExceeded; } retries_remaining_--; - if (!would_retry) { - return RetryStatus::No; - } if (!cluster_.resourceManager(priority_).retries().canCreate()) { cluster_.stats().upstream_rq_retry_overflow_.inc(); diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index 06f5f209ccd22..af96861b8b9a7 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -284,6 +284,8 @@ TEST_F(RouterRetryStateImplTest, Policy5xxRemote200RemoteReset) { EXPECT_TRUE(state_->enabled()); Http::TestHeaderMapImpl response_headers{{":status", "200"}}; EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_)); + expectTimerCreateAndEnable(); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(remote_reset_, callback_)); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_)); } @@ -496,8 +498,7 @@ TEST_F(RouterRetryStateImplTest, Backoff) { retry_timer_->callback_(); Http::TestHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_)); EXPECT_EQ(3UL, cluster_.stats().upstream_rq_retry_.value()); EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_success_.value()); @@ -538,6 +539,21 @@ TEST_F(RouterRetryStateImplTest, ZeroMaxRetriesHeader) { state_->shouldRetryReset(connect_failure_, callback_)); } +// Check that if there are 0 remaining retries available but we get +// non-retriable headers, we return No rather than NoRetryLimitExceeded. +TEST_F(RouterRetryStateImplTest, NoPreferredOverLimitExceeded) { + Http::TestHeaderMapImpl request_headers{{"x-envoy-retry-on", "5xx"}, + {"x-envoy-max-retries", "1"}}; + setup(request_headers); + + Http::TestHeaderMapImpl bad_response_headers{{":status", "503"}}; + expectTimerCreateAndEnable(); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(bad_response_headers, callback_)); + + Http::TestHeaderMapImpl good_response_headers{{":status", "200"}}; + EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(good_response_headers, callback_)); +} + } // namespace } // namespace Router } // namespace Envoy From ac7aa5ac8a815e5277b4d4659c5c02145fa1d56f Mon Sep 17 00:00:00 2001 From: James Synge Date: Wed, 10 Apr 2019 13:56:17 -0400 Subject: [PATCH 095/165] tidy: Add HeaderMap.empty() method, consistent with other containers (#6539) Signed-off-by: James Synge --- include/envoy/http/header_map.h | 5 +++++ source/common/http/header_map_impl.h | 2 ++ test/common/http/header_map_impl_test.cc | 23 +++++++++++++++++++++++ 3 files changed, 30 insertions(+) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index b68967ed8492c..8a094c1d793ce 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -511,6 +511,11 @@ class HeaderMap { */ virtual size_t size() const PURE; + /** + * @return true if the map is empty, false otherwise. + */ + virtual bool empty() const PURE; + /** * Allow easy pretty-printing of the key/value pairs in HeaderMap * @param os supplies the ostream to print to. diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 376311b4536f7..3bdbd1cd206ee 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -80,6 +80,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { void remove(const LowerCaseString& key) override; void removePrefix(const LowerCaseString& key) override; size_t size() const override { return headers_.size(); } + bool empty() const override { return headers_.empty(); } protected: // For tests only, unoptimized, they aren't intended for regular HeaderMapImpl users. @@ -177,6 +178,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { std::list::const_reverse_iterator rbegin() const { return headers_.rbegin(); } std::list::const_reverse_iterator rend() const { return headers_.rend(); } size_t size() const { return headers_.size(); } + bool empty() const { return headers_.empty(); } private: std::list headers_; diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index c8ffe6d455681..3962ed6ba0a14 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -287,8 +287,12 @@ TEST(HeaderStringTest, All) { TEST(HeaderMapImplTest, InlineInsert) { HeaderMapImpl headers; + EXPECT_TRUE(headers.empty()); + EXPECT_EQ(0, headers.size()); EXPECT_EQ(nullptr, headers.Host()); headers.insertHost().value(std::string("hello")); + EXPECT_FALSE(headers.empty()); + EXPECT_EQ(1, headers.size()); EXPECT_STREQ(":authority", headers.Host()->key().c_str()); EXPECT_STREQ("hello", headers.Host()->value().c_str()); EXPECT_STREQ("hello", headers.get(Headers::get().Host)->value().c_str()); @@ -323,25 +327,31 @@ TEST(HeaderMapImplTest, Remove) { EXPECT_STREQ("value", headers.get(static_key)->value().c_str()); EXPECT_EQ(HeaderString::Type::Reference, headers.get(static_key)->value().type()); EXPECT_EQ(1UL, headers.size()); + EXPECT_FALSE(headers.empty()); headers.remove(static_key); EXPECT_EQ(nullptr, headers.get(static_key)); EXPECT_EQ(0UL, headers.size()); + EXPECT_TRUE(headers.empty()); // Add and remove by inline. headers.insertContentLength().value(5); EXPECT_STREQ("5", headers.ContentLength()->value().c_str()); EXPECT_EQ(1UL, headers.size()); + EXPECT_FALSE(headers.empty()); headers.removeContentLength(); EXPECT_EQ(nullptr, headers.ContentLength()); EXPECT_EQ(0UL, headers.size()); + EXPECT_TRUE(headers.empty()); // Add inline and remove by name. headers.insertContentLength().value(5); EXPECT_STREQ("5", headers.ContentLength()->value().c_str()); EXPECT_EQ(1UL, headers.size()); + EXPECT_FALSE(headers.empty()); headers.remove(Headers::get().ContentLength); EXPECT_EQ(nullptr, headers.ContentLength()); EXPECT_EQ(0UL, headers.size()); + EXPECT_TRUE(headers.empty()); } TEST(HeaderMapImplTest, RemoveRegex) { @@ -377,6 +387,7 @@ TEST(HeaderMapImplTest, RemoveRegex) { headers.insertContentLength().value(5); EXPECT_STREQ("5", headers.ContentLength()->value().c_str()); EXPECT_EQ(1UL, headers.size()); + EXPECT_FALSE(headers.empty()); headers.removePrefix(LowerCaseString("content")); EXPECT_EQ(nullptr, headers.ContentLength()); } @@ -735,16 +746,20 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { LowerCaseString foo("hello"); Http::TestHeaderMapImpl headers{}; EXPECT_EQ(0UL, headers.size()); + EXPECT_TRUE(headers.empty()); headers.addReferenceKey(foo, "world"); EXPECT_EQ(1UL, headers.size()); + EXPECT_FALSE(headers.empty()); headers.setReferenceKey(Headers::get().ContentType, "text/html"); EXPECT_EQ(2UL, headers.size()); + EXPECT_FALSE(headers.empty()); // Pseudo header gets inserted before non-pseudo headers headers.setReferenceKey(Headers::get().Method, "PUT"); EXPECT_EQ(3UL, headers.size()); + EXPECT_FALSE(headers.empty()); InSequence seq; EXPECT_CALL(cb, Call(":method", "PUT")); @@ -761,6 +776,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Removal of the header before which pseudo-headers are inserted headers.remove(foo); EXPECT_EQ(2UL, headers.size()); + EXPECT_FALSE(headers.empty()); EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call("content-type", "text/html")); @@ -775,6 +791,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Next pseudo-header goes after other pseudo-headers, but before normal headers headers.setReferenceKey(Headers::get().Path, "/test"); EXPECT_EQ(3UL, headers.size()); + EXPECT_FALSE(headers.empty()); EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call(":path", "/test")); @@ -790,6 +807,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Removing the last normal header headers.remove(Headers::get().ContentType); EXPECT_EQ(2UL, headers.size()); + EXPECT_FALSE(headers.empty()); EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call(":path", "/test")); @@ -804,6 +822,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Adding a new pseudo-header after removing the last normal header headers.setReferenceKey(Headers::get().Host, "host"); EXPECT_EQ(3UL, headers.size()); + EXPECT_FALSE(headers.empty()); EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call(":path", "/test")); @@ -819,6 +838,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Adding the first normal header headers.setReferenceKey(Headers::get().ContentType, "text/html"); EXPECT_EQ(4UL, headers.size()); + EXPECT_FALSE(headers.empty()); EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call(":path", "/test")); @@ -837,6 +857,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.remove(Headers::get().Method); headers.remove(Headers::get().Host); EXPECT_EQ(1UL, headers.size()); + EXPECT_FALSE(headers.empty()); EXPECT_CALL(cb, Call("content-type", "text/html")); @@ -850,10 +871,12 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Removing all headers headers.remove(Headers::get().ContentType); EXPECT_EQ(0UL, headers.size()); + EXPECT_TRUE(headers.empty()); // Adding a lone pseudo-header headers.setReferenceKey(Headers::get().Status, "200"); EXPECT_EQ(1UL, headers.size()); + EXPECT_FALSE(headers.empty()); EXPECT_CALL(cb, Call(":status", "200")); From b8bcb11f5fb8c5ae19535444a9de10a207d9cec6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Dziedziak?= Date: Wed, 10 Apr 2019 20:08:43 +0200 Subject: [PATCH 096/165] Path header for absolute header were missing query params Part 2 (#6514) Description: Changed how create path header for absolute url. I found in RFC 3986 that url: http://foo.com/ isn't same ashttp://foo.com/? so we should pass ? to path header. I am not sure what should we do about url http://foo.com?. Also in previous PR I forget about fragment which starts with # sign. I am a bit concerned about security of this solution -> what about path with ../../ I am not sure if this is correct approach and I am open for suggestions. Risk Level: low Testing: added unit test Docs Changes: Release Notes: Fixes: #6459 Signed-off-by: Lukasz Dziedziak --- source/common/http/utility.cc | 17 +++++------------ test/common/http/utility_test.cc | 17 +++++++++++------ 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 7cd9715a66d1d..e004d1466b85b 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -56,18 +56,11 @@ bool Utility::Url::initialize(absl::string_view absolute_url) { // RFC allows the absolute-uri to not end in /, but the absolute path form // must start with - if ((u.field_set & (1 << UF_PATH)) == (1 << UF_PATH) && u.field_data[UF_PATH].len > 0) { - uint64_t path_len = u.field_data[UF_PATH].len; - if ((u.field_set & (1 << UF_QUERY)) == (1 << UF_QUERY) && u.field_data[UF_QUERY].len > 0) { - path_len += 1 + u.field_data[UF_QUERY].len; - } - path_and_query_params_ = - absl::string_view(absolute_url.data() + u.field_data[UF_PATH].off, path_len); - } else if ((u.field_set & (1 << UF_QUERY)) == (1 << UF_QUERY) && u.field_data[UF_QUERY].len > 0) { - // Http parser skips question mark and starts count from first character after ? - // so we need to move left by one - path_and_query_params_ = absl::string_view(absolute_url.data() + u.field_data[UF_QUERY].off - 1, - u.field_data[UF_QUERY].len + 1); + uint64_t path_len = + absolute_url.length() - (u.field_data[UF_HOST].off + host_and_port().length()); + if (path_len > 0) { + uint64_t path_beginning = u.field_data[UF_HOST].off + host_and_port().length(); + path_and_query_params_ = absl::string_view(absolute_url.data() + path_beginning, path_len); } else { path_and_query_params_ = absl::string_view(kDefaultPath, 1); } diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 6afc92e72886c..a0abcd3e8645c 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -767,20 +767,20 @@ TEST(Url, ParsingTest) { ValidateUrl("http://www.host.com/", "http", "www.host.com", "/"); // Test url with "?". - ValidateUrl("http://www.host.com:80/?", "http", "www.host.com:80", "/"); - ValidateUrl("http://www.host.com/?", "http", "www.host.com", "/"); + ValidateUrl("http://www.host.com:80/?", "http", "www.host.com:80", "/?"); + ValidateUrl("http://www.host.com/?", "http", "www.host.com", "/?"); // Test url with "?" but without slash. - ValidateUrl("http://www.host.com:80?", "http", "www.host.com:80", "/"); - ValidateUrl("http://www.host.com?", "http", "www.host.com", "/"); + ValidateUrl("http://www.host.com:80?", "http", "www.host.com:80", "?"); + ValidateUrl("http://www.host.com?", "http", "www.host.com", "?"); // Test url with multi-character path ValidateUrl("http://www.host.com:80/path", "http", "www.host.com:80", "/path"); ValidateUrl("http://www.host.com/path", "http", "www.host.com", "/path"); // Test url with multi-character path and ? at the end - ValidateUrl("http://www.host.com:80/path?", "http", "www.host.com:80", "/path"); - ValidateUrl("http://www.host.com/path?", "http", "www.host.com", "/path"); + ValidateUrl("http://www.host.com:80/path?", "http", "www.host.com:80", "/path?"); + ValidateUrl("http://www.host.com/path?", "http", "www.host.com", "/path?"); // Test https scheme ValidateUrl("https://www.host.com", "https", "www.host.com", "/"); @@ -803,6 +803,11 @@ TEST(Url, ParsingTest) { "/path?query=param&query2=param2"); ValidateUrl("http://www.host.com/path?query=param&query2=param2", "http", "www.host.com", "/path?query=param&query2=param2"); + // Test url with multi-character path, more than one query parameter and fragment + ValidateUrl("http://www.host.com:80/path?query=param&query2=param2#fragment", "http", + "www.host.com:80", "/path?query=param&query2=param2#fragment"); + ValidateUrl("http://www.host.com/path?query=param&query2=param2#fragment", "http", "www.host.com", + "/path?query=param&query2=param2#fragment"); } } // namespace Http From fcbfd5dd59cfd066879f8cd00b40ec2ca6ffebfe Mon Sep 17 00:00:00 2001 From: Daniel Mangum <31777345+HashedDan@users.noreply.github.com> Date: Wed, 10 Apr 2019 18:37:31 -0500 Subject: [PATCH 097/165] docs: make deprecated.md point to deprecated log (#6544) Because previous releases of Envoy point to DEPRECATED.md in error messages, it is necessary to link from it to the new location of the deprecated log in the Envoy developer docs Risk Level: Low Testing: N/A Docs Changes: DEPRECATED.md added with pointer to Deprecated Log in Envoy docs Note: This PR is in response to concerns raised by @lizan in #6454 Signed-off-by: HashedDan --- DEPRECATED.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 DEPRECATED.md diff --git a/DEPRECATED.md b/DEPRECATED.md new file mode 100644 index 0000000000000..ab705e0ac558a --- /dev/null +++ b/DEPRECATED.md @@ -0,0 +1,3 @@ +# DEPRECATED + +The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated) can be found in the official Envoy developer documentation. \ No newline at end of file From 553e9aeccf69983d4ac41f3dec72a8bc7b312ca9 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 10 Apr 2019 21:28:05 -0700 Subject: [PATCH 098/165] build: use clang-8 (#6534) Use Clang-8 in CI. Risk Level: Low Testing: CI Signed-off-by: Lizan Zhou --- .circleci/config.yml | 2 +- bazel/README.md | 2 +- bazel/cc_wrapper.py | 5 ++ bazel/genrule_repository.bzl | 12 ++-- ci/README.md | 6 +- ci/build_setup.sh | 6 +- ci/run_clang_tidy.sh | 6 +- source/common/common/posix/thread_impl.cc | 13 +++-- source/common/common/win32/thread_impl.cc | 14 ++--- source/common/event/file_event_impl.cc | 43 +++++++------- .../common/filesystem/inotify/watcher_impl.cc | 14 ++--- .../common/filesystem/kqueue/watcher_impl.cc | 17 +++--- source/common/http/conn_manager_impl.cc | 34 +++++------ source/common/http/utility.cc | 17 +++--- source/common/network/dns_impl.cc | 12 ++-- source/common/router/router.cc | 32 +++++----- source/common/upstream/subset_lb.cc | 58 +++++++++---------- .../common/tap/extension_config_base.cc | 4 +- .../filters/http/health_check/health_check.cc | 15 ++--- .../filters/http/jwt_authn/verifier.cc | 30 +++++----- .../filters/http/ratelimit/ratelimit.cc | 6 +- .../listener/proxy_protocol/proxy_protocol.cc | 14 ++--- .../network/common/redis/codec_impl.cc | 4 +- .../transport_sockets/tls/context_impl.cc | 14 ++--- source/server/hot_restart_impl.cc | 14 ++--- source/server/server.h | 3 +- test/common/secret/sds_api_test.cc | 30 +++++----- .../http/jwt_authn/authenticator_test.cc | 8 +-- .../transport_sockets/tls/ssl_socket_test.cc | 17 +++--- test/integration/http_integration.cc | 14 ++--- test/integration/integration_test.h | 13 +++-- test/server/options_impl_test.cc | 6 +- tools/check_format.py | 6 +- tools/check_format_test_helper.py | 2 +- 34 files changed, 256 insertions(+), 237 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2992cee69641c..a8f3fb63bbe9b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,7 +4,7 @@ executors: ubuntu-build: description: "A regular build executor based on ubuntu image" docker: - - image: envoyproxy/envoy-build:698009170e362f9ca0594f2b1927fbbee199bf98 + - image: envoyproxy/envoy-build:cfc514546bc0284536893cca5fa43d7128edcd35 resource_class: xlarge working_directory: /source diff --git a/bazel/README.md b/bazel/README.md index 6429e7174bdc4..5c719e201b5d0 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -33,7 +33,7 @@ for how to update or override dependencies. sudo apt-get install \ libtool \ cmake \ - clang-format-7 \ + clang-format-8 \ automake \ autoconf \ make \ diff --git a/bazel/cc_wrapper.py b/bazel/cc_wrapper.py index ba55379abeb0a..d31847904d2f3 100755 --- a/bazel/cc_wrapper.py +++ b/bazel/cc_wrapper.py @@ -71,6 +71,11 @@ def main(): else: argv += sys.argv[1:] + # Bazel will add -fuse-ld=gold in some cases, gcc/clang will take the last -fuse-ld argument, + # so whenever we see lld once, add it to the end. + if "-fuse-ld=lld" in argv: + argv.append("-fuse-ld=lld") + # Add compiler-specific options if "clang" in compiler: # This ensures that STL symbols are included. diff --git a/bazel/genrule_repository.bzl b/bazel/genrule_repository.bzl index cdddf14922de8..0689c39c88b0b 100644 --- a/bazel/genrule_repository.bzl +++ b/bazel/genrule_repository.bzl @@ -105,9 +105,11 @@ def _genrule_environment(ctx): # running. # # https://stackoverflow.com/questions/37603238/fsanitize-not-using-gold-linker-in-gcc-6-1 - force_ld_gold = [] - if "gcc" in c_compiler or "g++" in c_compiler: - force_ld_gold = ["-fuse-ld=gold"] + force_ld = [] + if "clang" in c_compiler: + force_ld = ["-fuse-ld=lld"] + elif "gcc" in c_compiler or "g++" in c_compiler: + force_ld = ["-fuse-ld=gold"] cc_flags = [] ld_flags = [] @@ -117,11 +119,11 @@ def _genrule_environment(ctx): if ctx.var.get("ENVOY_CONFIG_ASAN"): cc_flags += asan_flags ld_flags += asan_flags - ld_flags += force_ld_gold + ld_flags += force_ld if ctx.var.get("ENVOY_CONFIG_TSAN"): cc_flags += tsan_flags ld_flags += tsan_flags - ld_flags += force_ld_gold + ld_flags += force_ld lines.append("export CFLAGS=%r" % (" ".join(cc_flags),)) lines.append("export LDFLAGS=%r" % (" ".join(ld_flags),)) diff --git a/ci/README.md b/ci/README.md index a5428889a98e1..1676ab83ace6f 100644 --- a/ci/README.md +++ b/ci/README.md @@ -25,7 +25,7 @@ Currently there are three build images: * `envoyproxy/envoy-build` — alias to `envoyproxy/envoy-build-ubuntu`. * `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 16.04 (Xenial) which uses the GCC 5.4 compiler. -We also install and use the clang-7 compiler for some sanitizing runs. +We also install and use the clang-8 compiler for some sanitizing runs. # Building and running tests as a developer @@ -91,8 +91,8 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang. * `bazel.compile_time_options` — build Envoy and test with various compile-time options toggled to their non-default state, to ensure they still build. * `bazel.clang_tidy` — build and run clang-tidy over all source files. -* `check_format`— run `clang-format-7` and `buildifier` on entire source tree. -* `fix_format`— run and enforce `clang-format-7` and `buildifier` on entire source tree. +* `check_format`— run `clang-format` and `buildifier` on entire source tree. +* `fix_format`— run and enforce `clang-format` and `buildifier` on entire source tree. * `check_spelling`— run `misspell` on entire project. * `fix_spelling`— run and enforce `misspell` on entire project. * `check_spelling_pedantic`— run `aspell` on C++ and proto comments. diff --git a/ci/build_setup.sh b/ci/build_setup.sh index fa685a7eb498b..025fea240cc6f 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -17,10 +17,10 @@ function setup_gcc_toolchain() { } function setup_clang_toolchain() { - export PATH=/usr/lib/llvm-7/bin:$PATH + export PATH=/usr/lib/llvm-8/bin:$PATH export CC=clang export CXX=clang++ - export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-7/bin/llvm-symbolizer + export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-8/bin/llvm-symbolizer echo "$CC/$CXX toolchain configured" } @@ -64,7 +64,7 @@ if [[ -f "/etc/redhat-release" ]] then export BAZEL_BUILD_EXTRA_OPTIONS="--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1 --action_env=PATH ${BAZEL_BUILD_EXTRA_OPTIONS}" else - export BAZEL_BUILD_EXTRA_OPTIONS="--action_env=PATH=/bin:/usr/bin:/usr/lib/llvm-7/bin --linkopt=-fuse-ld=lld ${BAZEL_BUILD_EXTRA_OPTIONS}" + export BAZEL_BUILD_EXTRA_OPTIONS="--action_env=PATH=/bin:/usr/bin:/usr/lib/llvm-8/bin --linkopt=-fuse-ld=lld ${BAZEL_BUILD_EXTRA_OPTIONS}" fi # Not sandboxing, since non-privileged Docker can't do nested namespaces. diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index 27c8212b87ef0..8adbbd5089d83 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -37,13 +37,13 @@ function filter_excludes() { if [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running full clang-tidy..." - run-clang-tidy-7 + run-clang-tidy-8 elif [[ -z "${CIRCLE_PR_NUMBER}" && "$CIRCLE_BRANCH" == "master" ]]; then echo "On master branch, running clang-tidy-diff against previous commit..." - git diff HEAD^ | filter_excludes | clang-tidy-diff-7.py -p 1 + git diff HEAD^ | filter_excludes | clang-tidy-diff-8.py -p 1 else echo "Running clang-tidy-diff against master branch..." git fetch https://github.com/envoyproxy/envoy.git master git diff $(git merge-base HEAD FETCH_HEAD)..HEAD | filter_excludes | \ - clang-tidy-diff-7.py -p 1 + clang-tidy-diff-8.py -p 1 fi diff --git a/source/common/common/posix/thread_impl.cc b/source/common/common/posix/thread_impl.cc index 6421e32f2212f..897a55d62040b 100644 --- a/source/common/common/posix/thread_impl.cc +++ b/source/common/common/posix/thread_impl.cc @@ -33,12 +33,13 @@ bool ThreadIdImplPosix::isCurrentThreadId() const { return id_ == getCurrentThre ThreadImplPosix::ThreadImplPosix(std::function thread_routine) : thread_routine_(thread_routine) { RELEASE_ASSERT(Logger::Registry::initialized(), ""); - const int rc = pthread_create(&thread_handle_, nullptr, - [](void* arg) -> void* { - static_cast(arg)->thread_routine_(); - return nullptr; - }, - this); + const int rc = pthread_create( + &thread_handle_, nullptr, + [](void* arg) -> void* { + static_cast(arg)->thread_routine_(); + return nullptr; + }, + this); RELEASE_ASSERT(rc == 0, ""); } diff --git a/source/common/common/win32/thread_impl.cc b/source/common/common/win32/thread_impl.cc index 28bd8b189d211..bee7b9f2f9799 100644 --- a/source/common/common/win32/thread_impl.cc +++ b/source/common/common/win32/thread_impl.cc @@ -15,13 +15,13 @@ bool ThreadIdImplWin32::isCurrentThreadId() const { return id_ == ::GetCurrentTh ThreadImplWin32::ThreadImplWin32(std::function thread_routine) : thread_routine_(thread_routine) { RELEASE_ASSERT(Logger::Registry::initialized(), ""); - thread_handle_ = reinterpret_cast( - ::_beginthreadex(nullptr, 0, - [](void* arg) -> unsigned int { - static_cast(arg)->thread_routine_(); - return 0; - }, - this, 0, nullptr)); + thread_handle_ = reinterpret_cast(::_beginthreadex( + nullptr, 0, + [](void* arg) -> unsigned int { + static_cast(arg)->thread_routine_(); + return 0; + }, + this, 0, nullptr)); RELEASE_ASSERT(thread_handle_ != 0, ""); } diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index d478b3cd0cf5a..feee927132ee0 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -40,30 +40,31 @@ void FileEventImpl::activate(uint32_t events) { } void FileEventImpl::assignEvents(uint32_t events) { - event_assign(&raw_event_, base_, fd_, - EV_PERSIST | (trigger_ == FileTriggerType::Level ? 0 : EV_ET) | - (events & FileReadyType::Read ? EV_READ : 0) | - (events & FileReadyType::Write ? EV_WRITE : 0) | - (events & FileReadyType::Closed ? EV_CLOSED : 0), - [](evutil_socket_t, short what, void* arg) -> void { - FileEventImpl* event = static_cast(arg); - uint32_t events = 0; - if (what & EV_READ) { - events |= FileReadyType::Read; - } + event_assign( + &raw_event_, base_, fd_, + EV_PERSIST | (trigger_ == FileTriggerType::Level ? 0 : EV_ET) | + (events & FileReadyType::Read ? EV_READ : 0) | + (events & FileReadyType::Write ? EV_WRITE : 0) | + (events & FileReadyType::Closed ? EV_CLOSED : 0), + [](evutil_socket_t, short what, void* arg) -> void { + FileEventImpl* event = static_cast(arg); + uint32_t events = 0; + if (what & EV_READ) { + events |= FileReadyType::Read; + } - if (what & EV_WRITE) { - events |= FileReadyType::Write; - } + if (what & EV_WRITE) { + events |= FileReadyType::Write; + } - if (what & EV_CLOSED) { - events |= FileReadyType::Closed; - } + if (what & EV_CLOSED) { + events |= FileReadyType::Closed; + } - ASSERT(events); - event->cb_(events); - }, - this); + ASSERT(events); + event->cb_(events); + }, + this); } void FileEventImpl::setEnabled(uint32_t events) { diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc index 0bfac293b6dd8..a8956d348d9a7 100644 --- a/source/common/filesystem/inotify/watcher_impl.cc +++ b/source/common/filesystem/inotify/watcher_impl.cc @@ -17,13 +17,13 @@ namespace Filesystem { WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher) : inotify_fd_(inotify_init1(IN_NONBLOCK)), - inotify_event_(dispatcher.createFileEvent(inotify_fd_, - [this](uint32_t events) -> void { - ASSERT(events == Event::FileReadyType::Read); - onInotifyEvent(); - }, - Event::FileTriggerType::Edge, - Event::FileReadyType::Read)) { + inotify_event_(dispatcher.createFileEvent( + inotify_fd_, + [this](uint32_t events) -> void { + ASSERT(events == Event::FileReadyType::Read); + onInotifyEvent(); + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read)) { RELEASE_ASSERT(inotify_fd_ >= 0, ""); } diff --git a/source/common/filesystem/kqueue/watcher_impl.cc b/source/common/filesystem/kqueue/watcher_impl.cc index f5a030ba9b412..4b2e8c102b609 100644 --- a/source/common/filesystem/kqueue/watcher_impl.cc +++ b/source/common/filesystem/kqueue/watcher_impl.cc @@ -17,15 +17,14 @@ namespace Envoy { namespace Filesystem { WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher) - : queue_(kqueue()), - kqueue_event_(dispatcher.createFileEvent(queue_, - [this](uint32_t events) -> void { - if (events & Event::FileReadyType::Read) { - onKqueueEvent(); - } - }, - Event::FileTriggerType::Edge, - Event::FileReadyType::Read)) {} + : queue_(kqueue()), kqueue_event_(dispatcher.createFileEvent( + queue_, + [this](uint32_t events) -> void { + if (events & Event::FileReadyType::Read) { + onKqueueEvent(); + } + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read)) {} WatcherImpl::~WatcherImpl() { close(queue_); diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 1fe100755b71d..329758d835e1f 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1118,23 +1118,23 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( if (!state_.created_filter_chain_) { createFilterChain(); } - Utility::sendLocalReply(is_grpc_request, - [this, modify_headers](HeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - response_headers_ = std::move(headers); - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeHeaders(nullptr, *response_headers_, end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeData(nullptr, data, end_stream, - FilterIterationStartState::CanStartFromCurrent); - }, - state_.destroyed_, code, body, grpc_status, is_head_request); + Utility::sendLocalReply( + is_grpc_request, + [this, modify_headers](HeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + response_headers_ = std::move(headers); + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeHeaders(nullptr, *response_headers_, end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + }, + state_.destroyed_, code, body, grpc_status, is_head_request); } void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index e004d1466b85b..92c787125c630 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -287,14 +287,15 @@ void Utility::sendLocalReply(bool is_grpc, StreamDecoderFilterCallbacks& callbac const bool& is_reset, Code response_code, absl::string_view body_text, const absl::optional grpc_status, bool is_head_request) { - sendLocalReply(is_grpc, - [&](HeaderMapPtr&& headers, bool end_stream) -> void { - callbacks.encodeHeaders(std::move(headers), end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - callbacks.encodeData(data, end_stream); - }, - is_reset, response_code, body_text, grpc_status, is_head_request); + sendLocalReply( + is_grpc, + [&](HeaderMapPtr&& headers, bool end_stream) -> void { + callbacks.encodeHeaders(std::move(headers), end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + callbacks.encodeData(data, end_stream); + }, + is_reset, response_code, body_text, grpc_status, is_head_request); } void Utility::sendLocalReply( diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index 1ece7d020fcab..885f56816b80d 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -223,12 +223,12 @@ ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, } void DnsResolverImpl::PendingResolution::getHostByName(int family) { - ares_gethostbyname(channel_, dns_name_.c_str(), family, - [](void* arg, int status, int timeouts, hostent* hostent) { - static_cast(arg)->onAresHostCallback(status, timeouts, - hostent); - }, - this); + ares_gethostbyname( + channel_, dns_name_.c_str(), family, + [](void* arg, int status, int timeouts, hostent* hostent) { + static_cast(arg)->onAresHostCallback(status, timeouts, hostent); + }, + this); } } // namespace Network diff --git a/source/common/router/router.cc b/source/common/router/router.cc index e2bd72dedd598..e4b18f231de17 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -329,14 +329,14 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e if (cluster_->maintenanceMode()) { callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); chargeUpstreamCode(Http::Code::ServiceUnavailable, nullptr, true); - callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "maintenance mode", - [this](Http::HeaderMap& headers) { - if (!config_.suppress_envoy_headers_) { - headers.insertEnvoyOverloaded().value( - Http::Headers::get().EnvoyOverloadedValues.True); - } - }, - absl::nullopt); + callbacks_->sendLocalReply( + Http::Code::ServiceUnavailable, "maintenance mode", + [this](Http::HeaderMap& headers) { + if (!config_.suppress_envoy_headers_) { + headers.insertEnvoyOverloaded().value(Http::Headers::get().EnvoyOverloadedValues.True); + } + }, + absl::nullopt); cluster_->stats().upstream_rq_maintenance_mode_.inc(); return Http::FilterHeadersStatus::StopIteration; } @@ -594,14 +594,14 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ if (upstream_host != nullptr && !Http::CodeUtility::is5xx(enumToInt(code))) { upstream_host->stats().rq_error_.inc(); } - callbacks_->sendLocalReply(code, body, - [dropped, this](Http::HeaderMap& headers) { - if (dropped && !config_.suppress_envoy_headers_) { - headers.insertEnvoyOverloaded().value( - Http::Headers::get().EnvoyOverloadedValues.True); - } - }, - absl::nullopt); + callbacks_->sendLocalReply( + code, body, + [dropped, this](Http::HeaderMap& headers) { + if (dropped && !config_.suppress_envoy_headers_) { + headers.insertEnvoyOverloaded().value(Http::Headers::get().EnvoyOverloadedValues.True); + } + }, + absl::nullopt); } } diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index d35672d88b0a9..4c2fdc44ab9b4 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -281,35 +281,35 @@ void SubsetLoadBalancer::update(uint32_t priority, const HostVector& hosts_added const HostVector& hosts_removed) { updateFallbackSubset(priority, hosts_added, hosts_removed); - processSubsets(hosts_added, hosts_removed, - [&](LbSubsetEntryPtr entry) { - const bool active_before = entry->active(); - entry->priority_subset_->update(priority, hosts_added, hosts_removed); - - if (active_before && !entry->active()) { - stats_.lb_subsets_active_.dec(); - stats_.lb_subsets_removed_.inc(); - } else if (!active_before && entry->active()) { - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } - }, - [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs, - bool adding_host) { - UNREFERENCED_PARAMETER(kvs); - if (adding_host) { - ENVOY_LOG(debug, "subset lb: creating load balancer for {}", - describeMetadata(kvs)); - - // Initialize new entry with hosts and update stats. (An uninitialized entry - // with only removed hosts is a degenerate case and we leave the entry - // uninitialized.) - entry->priority_subset_.reset(new PrioritySubsetImpl( - *this, predicate, locality_weight_aware_, scale_locality_weight_)); - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } - }); + processSubsets( + hosts_added, hosts_removed, + [&](LbSubsetEntryPtr entry) { + const bool active_before = entry->active(); + entry->priority_subset_->update(priority, hosts_added, hosts_removed); + + if (active_before && !entry->active()) { + stats_.lb_subsets_active_.dec(); + stats_.lb_subsets_removed_.inc(); + } else if (!active_before && entry->active()) { + stats_.lb_subsets_active_.inc(); + stats_.lb_subsets_created_.inc(); + } + }, + [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs, + bool adding_host) { + UNREFERENCED_PARAMETER(kvs); + if (adding_host) { + ENVOY_LOG(debug, "subset lb: creating load balancer for {}", describeMetadata(kvs)); + + // Initialize new entry with hosts and update stats. (An uninitialized entry + // with only removed hosts is a degenerate case and we leave the entry + // uninitialized.) + entry->priority_subset_.reset(new PrioritySubsetImpl( + *this, predicate, locality_weight_aware_, scale_locality_weight_)); + stats_.lb_subsets_active_.inc(); + stats_.lb_subsets_created_.inc(); + } + }); } bool SubsetLoadBalancer::hostMatches(const SubsetMetadata& kvs, const Host& host) { diff --git a/source/extensions/common/tap/extension_config_base.cc b/source/extensions/common/tap/extension_config_base.cc index 83c514a357c9d..e72ce4458a2b9 100644 --- a/source/extensions/common/tap/extension_config_base.cc +++ b/source/extensions/common/tap/extension_config_base.cc @@ -29,7 +29,9 @@ ExtensionConfigBase::ExtensionConfigBase( ENVOY_LOG(debug, "initializing tap extension with static config"); break; } - default: { NOT_REACHED_GCOVR_EXCL_LINE; } + default: { + NOT_REACHED_GCOVR_EXCL_LINE; + } } } diff --git a/source/extensions/filters/http/health_check/health_check.cc b/source/extensions/filters/http/health_check/health_check.cc index 1fddafffffe66..11721329b677c 100644 --- a/source/extensions/filters/http/health_check/health_check.cc +++ b/source/extensions/filters/http/health_check/health_check.cc @@ -148,13 +148,14 @@ void HealthCheckFilter::onComplete() { } } - callbacks_->sendLocalReply(final_status, "", - [degraded](auto& headers) { - if (degraded) { - headers.insertEnvoyDegraded(); - } - }, - absl::nullopt); + callbacks_->sendLocalReply( + final_status, "", + [degraded](auto& headers) { + if (degraded) { + headers.insertEnvoyDegraded(); + } + }, + absl::nullopt); } } // namespace HealthCheck diff --git a/source/extensions/filters/http/jwt_authn/verifier.cc b/source/extensions/filters/http/jwt_authn/verifier.cc index f9b4f3a59c2d7..efa9b8548f828 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.cc +++ b/source/extensions/filters/http/jwt_authn/verifier.cc @@ -111,13 +111,14 @@ class ProviderVerifierImpl : public BaseVerifierImpl { auto& ctximpl = static_cast(*context); auto auth = auth_factory_.create(getAudienceChecker(), provider_name_, false); extractor_->sanitizePayloadHeaders(ctximpl.headers()); - auth->verify(ctximpl.headers(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); - }, - [this, context](const Status& status) { - onComplete(status, static_cast(*context)); - }); + auth->verify( + ctximpl.headers(), extractor_->extract(ctximpl.headers()), + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { + ctximpl.addPayload(name, payload); + }, + [this, context](const Status& status) { + onComplete(status, static_cast(*context)); + }); if (!ctximpl.getCompletionState(this).is_completed_) { ctximpl.storeAuth(std::move(auth)); } else { @@ -160,13 +161,14 @@ class AllowFailedVerifierImpl : public BaseVerifierImpl { auto& ctximpl = static_cast(*context); auto auth = auth_factory_.create(nullptr, absl::nullopt, true); extractor_.sanitizePayloadHeaders(ctximpl.headers()); - auth->verify(ctximpl.headers(), extractor_.extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); - }, - [this, context](const Status& status) { - onComplete(status, static_cast(*context)); - }); + auth->verify( + ctximpl.headers(), extractor_.extract(ctximpl.headers()), + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { + ctximpl.addPayload(name, payload); + }, + [this, context](const Status& status) { + onComplete(status, static_cast(*context)); + }); if (!ctximpl.getCompletionState(this).is_completed_) { ctximpl.storeAuth(std::move(auth)); } else { diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 244014f0fbe44..9fd61686f9c55 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -150,9 +150,9 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, if (status == Filters::Common::RateLimit::LimitStatus::OverLimit && config_->runtime().snapshot().featureEnabled("ratelimit.http_filter_enforcing", 100)) { state_ = State::Responded; - callbacks_->sendLocalReply(Http::Code::TooManyRequests, "", - [this](Http::HeaderMap& headers) { addHeaders(headers); }, - config_->rateLimitedGrpcStatus()); + callbacks_->sendLocalReply( + Http::Code::TooManyRequests, "", [this](Http::HeaderMap& headers) { addHeaders(headers); }, + config_->rateLimitedGrpcStatus()); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited); } else if (status == Filters::Common::RateLimit::LimitStatus::Error) { if (config_->failureModeAllow()) { diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 26189a504bea8..9c6fb7b93997c 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -33,13 +33,13 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { ENVOY_LOG(debug, "proxy_protocol: New connection accepted"); Network::ConnectionSocket& socket = cb.socket(); ASSERT(file_event_.get() == nullptr); - file_event_ = - cb.dispatcher().createFileEvent(socket.ioHandle().fd(), - [this](uint32_t events) { - ASSERT(events == Event::FileReadyType::Read); - onRead(); - }, - Event::FileTriggerType::Edge, Event::FileReadyType::Read); + file_event_ = cb.dispatcher().createFileEvent( + socket.ioHandle().fd(), + [this](uint32_t events) { + ASSERT(events == Event::FileReadyType::Read); + onRead(); + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read); cb_ = &cb; return Network::FilterStatus::StopIteration; } diff --git a/source/extensions/filters/network/common/redis/codec_impl.cc b/source/extensions/filters/network/common/redis/codec_impl.cc index 92faca71da2a9..4c80b43ff5123 100644 --- a/source/extensions/filters/network/common/redis/codec_impl.cc +++ b/source/extensions/filters/network/common/redis/codec_impl.cc @@ -248,7 +248,9 @@ void DecoderImpl::parseSlice(const Buffer::RawSlice& slice) { pending_value_stack_.front().value_->type(RespType::Integer); break; } - default: { throw ProtocolError("invalid value type"); } + default: { + throw ProtocolError("invalid value type"); + } } remaining--; diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 61c14856a58a5..85a945ac78d25 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -767,13 +767,13 @@ ServerContextImpl::ServerContextImpl(Stats::Scope& scope, } if (!parsed_alpn_protocols_.empty()) { - SSL_CTX_set_alpn_select_cb(ctx.ssl_ctx_.get(), - [](SSL*, const unsigned char** out, unsigned char* outlen, - const unsigned char* in, unsigned int inlen, void* arg) -> int { - return static_cast(arg)->alpnSelectCallback( - out, outlen, in, inlen); - }, - this); + SSL_CTX_set_alpn_select_cb( + ctx.ssl_ctx_.get(), + [](SSL*, const unsigned char** out, unsigned char* outlen, const unsigned char* in, + unsigned int inlen, void* arg) -> int { + return static_cast(arg)->alpnSelectCallback(out, outlen, in, inlen); + }, + this); } if (!session_ticket_keys_.empty()) { diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index 52f4827dfcb9b..1fb34c4a879c4 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -246,13 +246,13 @@ void HotRestartImpl::getParentStats(GetParentStatsInfo& info) { } void HotRestartImpl::initialize(Event::Dispatcher& dispatcher, Server::Instance& server) { - socket_event_ = - dispatcher.createFileEvent(my_domain_socket_, - [this](uint32_t events) -> void { - ASSERT(events == Event::FileReadyType::Read); - onSocketEvent(); - }, - Event::FileTriggerType::Edge, Event::FileReadyType::Read); + socket_event_ = dispatcher.createFileEvent( + my_domain_socket_, + [this](uint32_t events) -> void { + ASSERT(events == Event::FileReadyType::Read); + onSocketEvent(); + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read); server_ = &server; } diff --git a/source/server/server.h b/source/server/server.h index 6de9514367b82..8adb9e5b3d360 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -207,7 +207,8 @@ class InstanceImpl : Logger::Loggable, uint64_t numConnections(); void startWorkers(); void terminate(); - void notifyCallbacksForStage(Stage stage, Event::PostCb completion_cb = [] {}); + void notifyCallbacksForStage( + Stage stage, Event::PostCb completion_cb = [] {}); // init_manager_ must come before any member that participates in initialization, and destructed // only after referencing members are gone, since initialization continuation can potentially diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index e324702f214b1..163ad7b549f52 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -53,9 +53,9 @@ TEST_F(SdsApiTest, BasicTest) { auto google_grpc = grpc_service->mutable_google_grpc(); google_grpc->set_target_uri("fake_address"); google_grpc->set_stat_prefix("test"); - TlsCertificateSdsApi sds_api(server.localInfo(), server.dispatcher(), server.random(), - server.stats(), server.clusterManager(), init_manager, config_source, - "abc.com", []() {}, *api_); + TlsCertificateSdsApi sds_api( + server.localInfo(), server.dispatcher(), server.random(), server.stats(), + server.clusterManager(), init_manager, config_source, "abc.com", []() {}, *api_); NiceMock* grpc_client{new NiceMock()}; NiceMock* factory{new NiceMock()}; @@ -76,9 +76,9 @@ TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { NiceMock server; NiceMock init_manager; envoy::api::v2::core::ConfigSource config_source; - TlsCertificateSdsApi sds_api(server.localInfo(), server.dispatcher(), server.random(), - server.stats(), server.clusterManager(), init_manager, config_source, - "abc.com", []() {}, *api_); + TlsCertificateSdsApi sds_api( + server.localInfo(), server.dispatcher(), server.random(), server.stats(), + server.clusterManager(), init_manager, config_source, "abc.com", []() {}, *api_); NiceMock secret_callback; auto handle = @@ -243,9 +243,9 @@ TEST_F(SdsApiTest, EmptyResource) { NiceMock server; NiceMock init_manager; envoy::api::v2::core::ConfigSource config_source; - TlsCertificateSdsApi sds_api(server.localInfo(), server.dispatcher(), server.random(), - server.stats(), server.clusterManager(), init_manager, config_source, - "abc.com", []() {}, *api_); + TlsCertificateSdsApi sds_api( + server.localInfo(), server.dispatcher(), server.random(), server.stats(), + server.clusterManager(), init_manager, config_source, "abc.com", []() {}, *api_); Protobuf::RepeatedPtrField secret_resources; @@ -258,9 +258,9 @@ TEST_F(SdsApiTest, SecretUpdateWrongSize) { NiceMock server; NiceMock init_manager; envoy::api::v2::core::ConfigSource config_source; - TlsCertificateSdsApi sds_api(server.localInfo(), server.dispatcher(), server.random(), - server.stats(), server.clusterManager(), init_manager, config_source, - "abc.com", []() {}, *api_); + TlsCertificateSdsApi sds_api( + server.localInfo(), server.dispatcher(), server.random(), server.stats(), + server.clusterManager(), init_manager, config_source, "abc.com", []() {}, *api_); std::string yaml = R"EOF( @@ -288,9 +288,9 @@ TEST_F(SdsApiTest, SecretUpdateWrongSecretName) { NiceMock server; NiceMock init_manager; envoy::api::v2::core::ConfigSource config_source; - TlsCertificateSdsApi sds_api(server.localInfo(), server.dispatcher(), server.random(), - server.stats(), server.clusterManager(), init_manager, config_source, - "abc.com", []() {}, *api_); + TlsCertificateSdsApi sds_api( + server.localInfo(), server.dispatcher(), server.random(), server.stats(), + server.clusterManager(), init_manager, config_source, "abc.com", []() {}, *api_); std::string yaml = R"EOF( diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index c562b67a62e8a..b47fe293852a0 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -42,10 +42,10 @@ class AuthenticatorTest : public testing::Test { filter_config_ = ::std::make_shared(proto_config_, "", mock_factory_ctx_); raw_fetcher_ = new MockJwksFetcher; fetcher_.reset(raw_fetcher_); - auth_ = Authenticator::create(check_audience, provider, !provider, - filter_config_->getCache().getJwksCache(), filter_config_->cm(), - [this](Upstream::ClusterManager&) { return std::move(fetcher_); }, - filter_config_->timeSource()); + auth_ = Authenticator::create( + check_audience, provider, !provider, filter_config_->getCache().getJwksCache(), + filter_config_->cm(), [this](Upstream::ClusterManager&) { return std::move(fetcher_); }, + filter_config_->timeSource()); jwks_ = Jwks::createFrom(PublicKey, Jwks::JWKS); EXPECT_TRUE(jwks_->getStatus() == Status::Ok); } diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 3851df81a4921..9709e63a066aa 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -2226,14 +2226,15 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { // Verify that server sent list with 2 acceptable client certificate CA names. const SslSocket* ssl_socket = dynamic_cast(client_connection->ssl()); - SSL_set_cert_cb(ssl_socket->rawSslForTest(), - [](SSL* ssl, void*) -> int { - STACK_OF(X509_NAME)* list = SSL_get_client_CA_list(ssl); - EXPECT_NE(nullptr, list); - EXPECT_EQ(2U, sk_X509_NAME_num(list)); - return 1; - }, - nullptr); + SSL_set_cert_cb( + ssl_socket->rawSslForTest(), + [](SSL* ssl, void*) -> int { + STACK_OF(X509_NAME)* list = SSL_get_client_CA_list(ssl); + EXPECT_NE(nullptr, list); + EXPECT_EQ(2U, sk_X509_NAME_num(list)); + return 1; + }, + nullptr); client_connection->connect(); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index d2d6e4d33fc78..26a36e623d161 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -206,13 +206,13 @@ HttpIntegrationTest::makeHttpConnection(Network::ClientConnectionPtr&& conn) { HttpIntegrationTest::HttpIntegrationTest(Http::CodecClient::Type downstream_protocol, Network::Address::IpVersion version, const std::string& config) - : HttpIntegrationTest::HttpIntegrationTest(downstream_protocol, - [version](int) { - return Network::Utility::parseInternetAddress( - Network::Test::getAnyAddressString(version), - 0); - }, - version, config) {} + : HttpIntegrationTest::HttpIntegrationTest( + downstream_protocol, + [version](int) { + return Network::Utility::parseInternetAddress( + Network::Test::getAnyAddressString(version), 0); + }, + version, config) {} HttpIntegrationTest::HttpIntegrationTest(Http::CodecClient::Type downstream_protocol, const InstanceConstSharedPtrFn& upstream_address_fn, diff --git a/test/integration/integration_test.h b/test/integration/integration_test.h index a3b86ead3a858..2af85a268ec9c 100644 --- a/test/integration/integration_test.h +++ b/test/integration/integration_test.h @@ -16,11 +16,12 @@ class UpstreamEndpointIntegrationTest : public testing::TestWithParam(argv.size(), argv.data(), - [](uint64_t, uint64_t, bool) { return "1"; }, - spdlog::level::warn); + return std::make_unique( + argv.size(), argv.data(), [](uint64_t, uint64_t, bool) { return "1"; }, + spdlog::level::warn); } }; diff --git a/tools/check_format.py b/tools/check_format.py index 4be8bd4e6999b..07b960aac2f83 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -46,7 +46,7 @@ # Files in these paths can use Protobuf::util::JsonStringToMessage JSON_STRING_TO_MESSAGE_WHITELIST = ('./source/common/protobuf/utility.cc') -CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-7") +CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-8") BUILDIFIER_PATH = os.getenv("BUILDIFIER_BIN", "$GOPATH/bin/buildifier") ENVOY_BUILD_FIXER_PATH = os.path.join( os.path.dirname(os.path.abspath(sys.argv[0])), "envoy_build_fixer.py") @@ -120,8 +120,8 @@ def checkTools(): "installed, but the binary name is different or it's not available in " "PATH, please use CLANG_FORMAT environment variable to specify the path. " "Examples:\n" - " export CLANG_FORMAT=clang-format-7.0.0\n" - " export CLANG_FORMAT=/opt/bin/clang-format-7\n" + " export CLANG_FORMAT=clang-format-8.0.0\n" + " export CLANG_FORMAT=/opt/bin/clang-format-8\n" " export CLANG_FORMAT=/usr/local/opt/llvm@7/bin/clang-format".format(CLANG_FORMAT_PATH)) buildifier_abs_path = lookPath(BUILDIFIER_PATH) diff --git a/tools/check_format_test_helper.py b/tools/check_format_test_helper.py index a348a861e4e66..395c0de3e7da9 100755 --- a/tools/check_format_test_helper.py +++ b/tools/check_format_test_helper.py @@ -136,7 +136,7 @@ def checkToolNotFoundError(): # Temporarily change PATH to test the error about lack of external tools. oldPath = os.environ["PATH"] os.environ["PATH"] = "/sbin:/usr/sbin" - clang_format = os.getenv("CLANG_FORMAT", "clang-format-7") + clang_format = os.getenv("CLANG_FORMAT", "clang-format-8") errors = checkFileExpectingError("no_namespace_envoy.cc", "Command %s not found." % clang_format) os.environ["PATH"] = oldPath return errors From b81ff993d954c2539d0c60d736d1322f13682dfe Mon Sep 17 00:00:00 2001 From: danzh Date: Thu, 11 Apr 2019 10:25:12 -0400 Subject: [PATCH 099/165] quiche: implement quic_port_utils (#6488) Implement QuicPickUnusedPortOrDie() and QuicRecyclePort() backed by Envoy::Network::Test::findOrCheckFreePort(). Added include_prefix to envoy_cc_test_library argument list and pass it down. So that quiche test only impl's can be included by api header files with relative #include path. Risk Level: low, not in use Testing: added new tests in test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc Part of #2557 Signed-off-by: Dan Zhang --- bazel/envoy_build_system.bzl | 8 +++- bazel/external/quiche.BUILD | 14 ++++++ source/common/network/address_impl.cc | 21 +++++---- .../quic_listeners/quiche/platform/BUILD | 8 ++++ .../quiche/platform/quic_port_utils_impl.h | 11 +++++ .../quic_listeners/quiche/platform/BUILD | 13 ++++++ .../quiche/platform/quic_platform_test.cc | 41 +++++++++++++++++ .../platform/quic_port_utils_test_impl.cc | 44 +++++++++++++++++++ .../platform/quic_port_utils_test_impl.h | 16 +++++++ 9 files changed, 163 insertions(+), 13 deletions(-) create mode 100644 source/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h create mode 100644 test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.cc create mode 100644 test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.h diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 2b75b1e018fae..d7de0270b6c48 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -487,7 +487,8 @@ def envoy_cc_test_infrastructure_library( external_deps = [], deps = [], repository = "", - tags = []): + tags = [], + include_prefix = None): native.cc_library( name = name, srcs = srcs, @@ -499,6 +500,7 @@ def envoy_cc_test_infrastructure_library( envoy_external_dep_path("googletest"), ], tags = tags, + include_prefix = include_prefix, alwayslink = 1, linkstatic = 1, visibility = ["//visibility:public"], @@ -514,7 +516,8 @@ def envoy_cc_test_library( external_deps = [], deps = [], repository = "", - tags = []): + tags = [], + include_prefix = None): deps = deps + [ repository + "//test/test_common:printers_includes", ] @@ -527,6 +530,7 @@ def envoy_cc_test_library( deps, repository, tags, + include_prefix, ) # Envoy test binaries should be specified with this function. diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 59b4d894ab894..69cae5ea7c61f 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -137,6 +137,20 @@ cc_library( deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_export_impl_lib"], ) +cc_library( + name = "quic_platform_port_utils", + testonly = 1, + hdrs = envoy_select_quiche( + ["quiche/quic/platform/api/quic_port_utils.h"], + "@envoy", + ), + visibility = ["//visibility:public"], + deps = envoy_select_quiche( + ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_impl_lib"], + "@envoy", + ), +) + cc_library( name = "quic_platform_base", hdrs = [ diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 7298f304f6908..0b6536074d3eb 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -219,9 +219,9 @@ bool Ipv4Instance::operator==(const Instance& rhs) const { } Api::SysCallIntResult Ipv4Instance::bind(int fd) const { - const int rc = ::bind(fd, reinterpret_cast(&ip_.ipv4_.address_), - sizeof(ip_.ipv4_.address_)); - return {rc, errno}; + auto& os_syscalls = Api::OsSysCallsSingleton::get(); + return os_syscalls.bind(fd, reinterpret_cast(&ip_.ipv4_.address_), + sizeof(ip_.ipv4_.address_)); } Api::SysCallIntResult Ipv4Instance::connect(int fd) const { @@ -310,9 +310,9 @@ bool Ipv6Instance::operator==(const Instance& rhs) const { } Api::SysCallIntResult Ipv6Instance::bind(int fd) const { - const int rc = ::bind(fd, reinterpret_cast(&ip_.ipv6_.address_), - sizeof(ip_.ipv6_.address_)); - return {rc, errno}; + auto& os_syscalls = Api::OsSysCallsSingleton::get(); + return os_syscalls.bind(fd, reinterpret_cast(&ip_.ipv6_.address_), + sizeof(ip_.ipv6_.address_)); } Api::SysCallIntResult Ipv6Instance::connect(int fd) const { @@ -370,17 +370,16 @@ PipeInstance::PipeInstance(const std::string& pipe_path) : InstanceBase(Type::Pi bool PipeInstance::operator==(const Instance& rhs) const { return asString() == rhs.asString(); } Api::SysCallIntResult PipeInstance::bind(int fd) const { + auto& os_syscalls = Api::OsSysCallsSingleton::get(); if (abstract_namespace_) { - const int rc = ::bind(fd, reinterpret_cast(&address_), - offsetof(struct sockaddr_un, sun_path) + address_length_); - return {rc, errno}; + return os_syscalls.bind(fd, reinterpret_cast(&address_), + offsetof(struct sockaddr_un, sun_path) + address_length_); } // Try to unlink an existing filesystem object at the requested path. Ignore // errors -- it's fine if the path doesn't exist, and if it exists but can't // be unlinked then `::bind()` will generate a reasonable errno. unlink(address_.sun_path); - const int rc = ::bind(fd, reinterpret_cast(&address_), sizeof(address_)); - return {rc, errno}; + return os_syscalls.bind(fd, reinterpret_cast(&address_), sizeof(address_)); } Api::SysCallIntResult PipeInstance::connect(int fd) const { diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index f9aa6755c62d3..4244e69f7c91b 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -3,6 +3,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", + "envoy_cc_test_library", "envoy_package", "envoy_select_quiche", ) @@ -65,6 +66,13 @@ envoy_cc_library( visibility = ["//visibility:public"], ) +envoy_cc_test_library( + name = "quic_platform_port_utils_impl_lib", + hdrs = ["quic_port_utils_impl.h"], + include_prefix = "extensions/quic_listeners/quiche/platform", + deps = ["//test/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_test_impl_lib"], +) + envoy_cc_library( name = "quic_platform_logging_impl_lib", srcs = ["quic_logging_impl.cc"], diff --git a/source/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h new file mode 100644 index 0000000000000..449f0cb7524cd --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h @@ -0,0 +1,11 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +// Link in actually implementation under //test. This is necessary because test +// only feature should stay under //test to for maintenance purpose. +#include "test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.h" diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index 16d008648f526..1518271bc8d2a 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -32,8 +32,11 @@ envoy_cc_test( external_deps = ["quiche_quic_platform"], deps = [ "//test/extensions/transport_sockets/tls:ssl_test_utils", + "//test/mocks/api:api_mocks", "//test/test_common:logging_lib", + "//test/test_common:threadsafe_singleton_injector_lib", "//test/test_common:utility_lib", + "@com_googlesource_quiche//:quic_platform_port_utils", "@com_googlesource_quiche//:quic_platform_sleep", ], ) @@ -47,3 +50,13 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_test_library( + name = "quic_platform_port_utils_test_impl_lib", + srcs = ["quic_port_utils_test_impl.cc"], + hdrs = ["quic_port_utils_test_impl.h"], + deps = [ + "//source/common/network:utility_lib", + "//test/test_common:environment_lib", + ], +) diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 8ec7107bf27da..294e58e708ba0 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -4,12 +4,19 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include + #include #include +#include "common/network/utility.h" + #include "test/extensions/transport_sockets/tls/ssl_test_utility.h" +#include "test/mocks/api/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/logging.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -30,6 +37,7 @@ #include "quiche/quic/platform/api/quic_map_util.h" #include "quiche/quic/platform/api/quic_mock_log.h" #include "quiche/quic/platform/api/quic_mutex.h" +#include "quiche/quic/platform/api/quic_port_utils.h" #include "quiche/quic/platform/api/quic_ptr_util.h" #include "quiche/quic/platform/api/quic_server_stats.h" #include "quiche/quic/platform/api/quic_sleep.h" @@ -39,6 +47,7 @@ #include "quiche/quic/platform/api/quic_thread.h" #include "quiche/quic/platform/api/quic_uint128.h" +using testing::_; using testing::HasSubstr; // Basic tests to validate functioning of the QUICHE quic platform @@ -47,6 +56,8 @@ using testing::HasSubstr; // minimal, and serve primarily to verify the APIs compile and link without // issue. +using testing::Return; + namespace quic { namespace { @@ -531,5 +542,35 @@ TEST_F(FileUtilsTest, ReadFileContents) { EXPECT_EQ(data, output); } +TEST_F(QuicPlatformTest, PickUnsedPort) { + int port = QuicPickUnusedPortOrDie(); + std::vector supported_versions = + Envoy::TestEnvironment::getIpVersionsForTest(); + for (auto ip_version : supported_versions) { + Envoy::Network::Address::InstanceConstSharedPtr addr = + Envoy::Network::Test::getCanonicalLoopbackAddress(ip_version); + Envoy::Network::Address::InstanceConstSharedPtr addr_with_port = + Envoy::Network::Utility::getAddressWithPort(*addr, port); + Envoy::Network::IoHandlePtr io_handle = + addr_with_port->socket(Envoy::Network::Address::SocketType::Datagram); + // binding of given port should success. + EXPECT_EQ(0, addr_with_port->bind(io_handle->fd()).rc_); + } +} + +TEST_F(QuicPlatformTest, FailToPickUnsedPort) { + Envoy::Api::MockOsSysCalls os_sys_calls; + Envoy::TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + // Actually create sockets. + EXPECT_CALL(os_sys_calls, socket(_, _, _)).WillRepeatedly([](int domain, int type, int protocol) { + int fd = ::socket(domain, type, protocol); + return Envoy::Api::SysCallIntResult{fd, errno}; + }); + // Fail bind call's to mimic port exhaustion. + EXPECT_CALL(os_sys_calls, bind(_, _, _)) + .WillRepeatedly(Return(Envoy::Api::SysCallIntResult{-1, EADDRINUSE})); + EXPECT_DEATH_LOG_TO_STDERR(QuicPickUnusedPortOrDie(), "Failed to pick a port for test."); +} + } // namespace } // namespace quic diff --git a/test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.cc b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.cc new file mode 100644 index 0000000000000..e8ae583bc494d --- /dev/null +++ b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.cc @@ -0,0 +1,44 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.h" + +#include "envoy/network/address.h" + +#include "common/common/assert.h" +#include "common/network/utility.h" + +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" + +namespace quic { + +int QuicPickUnusedPortOrDieImpl() { + std::vector supported_versions = + Envoy::TestEnvironment::getIpVersionsForTest(); + ASSERT(!supported_versions.empty()); + // Checking availability under corresponding supported version if test + // supports v4 only or v6 only. + // If it supports both v4 and v6, checking availability under v6 with IPV6_V6ONLY + // set to false is sufficient because such socket can be used on v4-mapped + // v6 address. + const Envoy::Network::Address::IpVersion ip_version = + supported_versions.size() == 1 ? supported_versions[0] + : Envoy::Network::Address::IpVersion::v6; + auto addr_port = Envoy::Network::Utility::parseInternetAddressAndPort( + fmt::format("{}:{}", Envoy::Network::Test::getAnyAddressUrlString(ip_version), /*port*/ 0), + /*v6only*/ false); + ASSERT(addr_port != nullptr); + addr_port = Envoy::Network::Test::findOrCheckFreePort( + addr_port, Envoy::Network::Address::SocketType::Datagram); + if (addr_port != nullptr && addr_port->ip() != nullptr) { + // Find a port. + return addr_port->ip()->port(); + } + RELEASE_ASSERT(false, "Failed to pick a port for test."); +} + +} // namespace quic diff --git a/test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.h b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.h new file mode 100644 index 0000000000000..405266ca0c4eb --- /dev/null +++ b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_test_impl.h @@ -0,0 +1,16 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +namespace quic { + +int QuicPickUnusedPortOrDieImpl(); +inline void QuicRecyclePortImpl(int) { + // No-op with current port picking implementation. +} + +} // namespace quic From d0feea094d208c1b2e6f1ff524c8e94517e662e8 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 11 Apr 2019 10:32:05 -0400 Subject: [PATCH 100/165] test: adding acess log utils for the integration test (#6547) Risk Level: n/a (test only) Testing: added a sample test Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- test/config/BUILD | 1 + test/config/utility.cc | 21 +++++++++++++++++++++ test/config/utility.h | 4 ++++ test/integration/BUILD | 2 +- test/integration/http_integration.cc | 18 ++++++++++++++++++ test/integration/http_integration.h | 6 ++++++ test/integration/integration_test.cc | 3 +++ test/test_common/environment.cc | 9 ++++++--- test/test_common/environment.h | 5 ++++- test/test_common/utility.h | 10 ++++++++++ 10 files changed, 74 insertions(+), 5 deletions(-) diff --git a/test/config/BUILD b/test/config/BUILD index 226469a1fdad3..17d8b7107e884 100644 --- a/test/config/BUILD +++ b/test/config/BUILD @@ -30,6 +30,7 @@ envoy_cc_test_library( "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/api/v2:eds_cc", + "@envoy_api//envoy/config/accesslog/v2:file_cc", "@envoy_api//envoy/config/bootstrap/v2:bootstrap_cc", "@envoy_api//envoy/config/transport_socket/tap/v2alpha:tap_cc", ], diff --git a/test/config/utility.cc b/test/config/utility.cc index 971244540ee8f..a27970824b5b9 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -1,5 +1,6 @@ #include "test/config/utility.h" +#include "envoy/config/accesslog/v2/file.pb.h" #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" #include "envoy/config/transport_socket/tap/v2alpha/tap.pb.h" #include "envoy/http/codec.h" @@ -59,6 +60,12 @@ const std::string ConfigHelper::HTTP_PROXY_CONFIG = BASE_CONFIG + R"EOF( http_filters: name: envoy.router codec_type: HTTP1 + access_log: + name: envoy.file_access_log + filter: + not_health_check_filter: {} + config: + path: /dev/null route_config: virtual_hosts: name: integration @@ -486,6 +493,20 @@ void ConfigHelper::addSslConfig(const ServerSslOptions& options) { initializeTls(options, *filter_chain->mutable_tls_context()->mutable_common_tls_context()); } +bool ConfigHelper::setAccessLog(const std::string& filename) { + if (getFilterFromListener("envoy.http_connection_manager") == nullptr) { + return false; + } + // Replace /dev/null with a real path for the file access log. + envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager hcm_config; + loadHttpConnectionManager(hcm_config); + envoy::config::accesslog::v2::FileAccessLog access_log_config; + access_log_config.set_path(filename); + MessageUtil::jsonConvert(access_log_config, *hcm_config.mutable_access_log(0)->mutable_config()); + storeHttpConnectionManager(hcm_config); + return true; +} + void ConfigHelper::initializeTls(const ServerSslOptions& options, envoy::api::v2::auth::CommonTlsContext& common_tls_context) { common_tls_context.add_alpn_protocols("h2"); diff --git a/test/config/utility.h b/test/config/utility.h index e1c00041894e8..e93d5d579f9c1 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -128,6 +128,10 @@ class ConfigHelper { void addSslConfig(const ServerSslOptions& options); void addSslConfig() { addSslConfig({}); } + // Set the HTTP access log for the first HCM (if present) to a given file. The default is + // /dev/null. + bool setAccessLog(const std::string& filename); + // Renames the first listener to the name specified. void renameListener(const std::string& name); diff --git a/test/integration/BUILD b/test/integration/BUILD index 6c547da012143..8b11b96571885 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -346,6 +346,7 @@ envoy_cc_test_library( "//source/common/thread_local:thread_local_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", + "//source/extensions/access_loggers/file:config", "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tap:config", "//source/extensions/transport_sockets/tls:ssl_socket_lib", @@ -380,7 +381,6 @@ envoy_cc_test( ":http_integration_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", - "//source/extensions/access_loggers/file:config", "//source/extensions/filters/http/cors:config", "//source/extensions/filters/http/dynamo:config", "//source/extensions/filters/http/grpc_http1_bridge:config", diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 26a36e623d161..4c9bb7b550946 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -226,12 +226,30 @@ HttpIntegrationTest::HttpIntegrationTest(Http::CodecClient::Type downstream_prot config_helper_.setClientCodec(typeToCodecType(downstream_protocol_)); } +void HttpIntegrationTest::useAccessLog() { + access_log_name_ = TestEnvironment::temporaryPath(TestUtility::uniqueFilename()); + ASSERT_TRUE(config_helper_.setAccessLog(access_log_name_)); +} + HttpIntegrationTest::~HttpIntegrationTest() { cleanupUpstreamAndDownstream(); test_server_.reset(); fake_upstreams_.clear(); } +std::string HttpIntegrationTest::waitForAccessLog(const std::string& filename) { + // Wait a max of 1s for logs to flush to disk. + for (int i = 0; i < 1000; ++i) { + std::string contents = TestEnvironment::readFileToStringForTest(filename, false); + if (contents.length() > 0) { + return contents; + } + usleep(1000); + } + RELEASE_ASSERT(0, "Timed out waiting for access log"); + return ""; +} + void HttpIntegrationTest::setDownstreamProtocol(Http::CodecClient::Type downstream_protocol) { downstream_protocol_ = downstream_protocol; config_helper_.setClientCodec(typeToCodecType(downstream_protocol_)); diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 3fb328ac2be56..023083ca8141e 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -96,7 +96,12 @@ class HttpIntegrationTest : public BaseIntegrationTest { const std::string& config = ConfigHelper::HTTP_PROXY_CONFIG); virtual ~HttpIntegrationTest(); + // Waits for the first access log entry. + std::string waitForAccessLog(const std::string& filename); + protected: + void useAccessLog(); + IntegrationCodecClientPtr makeHttpConnection(uint32_t port); // Makes a http connection object without checking its connected state. IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn); @@ -199,5 +204,6 @@ class HttpIntegrationTest : public BaseIntegrationTest { // The codec type for the client-to-Envoy connection Http::CodecClient::Type downstream_protocol_{Http::CodecClient::Type::HTTP1}; uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; + std::string access_log_name_; }; } // namespace Envoy diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index c6bfa5b5fe7f0..dd0316991b66c 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -291,6 +291,7 @@ TEST_P(IntegrationTest, Http10DisabledWithUpgrade) { // Turn HTTP/1.0 support on and verify 09 style requests work. TEST_P(IntegrationTest, Http09Enabled) { + useAccessLog(); autonomous_upstream_ = true; config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost); initialize(); @@ -304,6 +305,8 @@ TEST_P(IntegrationTest, Http09Enabled) { reinterpret_cast(fake_upstreams_.front().get())->lastRequestHeaders(); ASSERT_TRUE(upstream_headers != nullptr); EXPECT_EQ(upstream_headers->Host()->value(), "default.com"); + + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("HTTP/1.0")); } // Turn HTTP/1.0 support on and verify the request is proxied and the default host is sent upstream. diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index d56bcc2a7ef66..136b35a1f222b 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -226,11 +226,14 @@ std::string TestEnvironment::temporaryFileSubstitute(const std::string& path, return temporaryFileSubstitute(path, ParamMap(), port_map, version); } -std::string TestEnvironment::readFileToStringForTest(const std::string& filename) { +std::string TestEnvironment::readFileToStringForTest(const std::string& filename, + bool require_existence) { std::ifstream file(filename); if (file.fail()) { - std::cerr << "failed to open: " << filename << std::endl; - RELEASE_ASSERT(false, ""); + if (!require_existence) { + return ""; + } + RELEASE_ASSERT(false, absl::StrCat("failed to open: ", filename)); } std::stringstream file_string_stream; diff --git a/test/test_common/environment.h b/test/test_common/environment.h index 3fe069f5f09c5..f0d8a39f2a5eb 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -172,9 +172,12 @@ class TestEnvironment { * Dumps the contents of the file into the string. * * @param filename: the fully qualified name of the file to use + * @param require_existence if true, RELEASE_ASSERT if the file does not exist. + * If false, an empty string will be returned if the file is not present. * @return string the contents of the file. */ - static std::string readFileToStringForTest(const std::string& filename); + static std::string readFileToStringForTest(const std::string& filename, + bool require_existence = true); /** * Create a path on the filesystem (mkdir -p ... equivalent). diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 2ed49861ffd79..6d83be806879d 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -182,6 +182,16 @@ class TestUtility { */ static std::vector listFiles(const std::string& path, bool recursive); + /** + * Return a unique temporary filename for use in tests. + * + * @return a filename based on the process id and current time. + */ + + static std::string uniqueFilename() { + return absl::StrCat(getpid(), "_", std::chrono::system_clock::now().time_since_epoch().count()); + } + /** * Compare two protos of the same type for equality. * From ffb5587ce69f03d4c3d00c7e7ae9db6e6d9306d1 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Thu, 11 Apr 2019 08:25:10 -0700 Subject: [PATCH 101/165] zookeeper: minor no-op cleanup (#6501) remove un-necessary imports & bazel dependencies remove un-necessary namespace prefix in zookeeper_config.cc minor formatting nit in zookeeper_filter.cc add missing stdlib imports (IWYU) Signed-off-by: Derek Argueta --- .../filters/network/zookeeper_proxy/BUILD | 19 ++++++------ .../{zookeeper_config.cc => config.cc} | 7 ++--- .../{zookeeper_config.h => config.h} | 2 +- .../{zookeeper_decoder.cc => decoder.cc} | 4 ++- .../{zookeeper_decoder.h => decoder.h} | 4 ++- .../{zookeeper_filter.cc => filter.cc} | 5 +++- .../{zookeeper_filter.h => filter.h} | 29 +++++++++++-------- .../{zookeeper_utils.cc => utils.cc} | 4 ++- .../{zookeeper_utils.h => utils.h} | 2 ++ .../filters/network/zookeeper_proxy/BUILD | 4 +-- ...ookeeper_filter_test.cc => filter_test.cc} | 4 +-- 11 files changed, 50 insertions(+), 34 deletions(-) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_config.cc => config.cc} (84%) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_config.h => config.h} (93%) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_decoder.cc => decoder.cc} (99%) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_decoder.h => decoder.h} (98%) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_filter.cc => filter.cc} (98%) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_filter.h => filter.h} (88%) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_utils.cc => utils.cc} (95%) rename source/extensions/filters/network/zookeeper_proxy/{zookeeper_utils.h => utils.h} (98%) rename test/extensions/filters/network/zookeeper_proxy/{zookeeper_filter_test.cc => filter_test.cc} (99%) diff --git a/source/extensions/filters/network/zookeeper_proxy/BUILD b/source/extensions/filters/network/zookeeper_proxy/BUILD index 4fae6bda72674..26d144167c519 100644 --- a/source/extensions/filters/network/zookeeper_proxy/BUILD +++ b/source/extensions/filters/network/zookeeper_proxy/BUILD @@ -14,21 +14,22 @@ envoy_package() envoy_cc_library( name = "proxy_lib", srcs = [ - "zookeeper_decoder.cc", - "zookeeper_filter.cc", - "zookeeper_utils.cc", + "decoder.cc", + "filter.cc", + "utils.cc", ], hdrs = [ - "zookeeper_decoder.h", - "zookeeper_filter.h", - "zookeeper_utils.h", + "decoder.h", + "filter.h", + "utils.h", ], deps = [ "//include/envoy/network:filter_interface", "//include/envoy/server:filter_config_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", - "//source/common/config:filter_json_lib", + "//source/common/buffer:buffer_lib", + "//source/common/common:enum_to_int", "//source/common/network:filter_lib", "//source/extensions/filters/network:well_known_names", ], @@ -36,8 +37,8 @@ envoy_cc_library( envoy_cc_library( name = "config", - srcs = ["zookeeper_config.cc"], - hdrs = ["zookeeper_config.h"], + srcs = ["config.cc"], + hdrs = ["config.h"], deps = [ ":proxy_lib", "//source/extensions/filters/network:well_known_names", diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.cc b/source/extensions/filters/network/zookeeper_proxy/config.cc similarity index 84% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_config.cc rename to source/extensions/filters/network/zookeeper_proxy/config.cc index 7a2bda7a7bcbd..b46bbde4fbf5b 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.cc +++ b/source/extensions/filters/network/zookeeper_proxy/config.cc @@ -1,4 +1,4 @@ -#include "extensions/filters/network/zookeeper_proxy/zookeeper_config.h" +#include "extensions/filters/network/zookeeper_proxy/config.h" #include @@ -8,7 +8,7 @@ #include "common/common/logger.h" -#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" +#include "extensions/filters/network/zookeeper_proxy/filter.h" namespace Envoy { namespace Extensions { @@ -18,8 +18,7 @@ namespace ZooKeeperProxy { /** * Config registration for the ZooKeeper proxy filter. @see NamedNetworkFilterConfigFactory. */ -Network::FilterFactoryCb -NetworkFilters::ZooKeeperProxy::ZooKeeperConfigFactory::createFilterFactoryFromProtoTyped( +Network::FilterFactoryCb ZooKeeperConfigFactory::createFilterFactoryFromProtoTyped( const envoy::config::filter::network::zookeeper_proxy::v1alpha1::ZooKeeperProxy& proto_config, Server::Configuration::FactoryContext& context) { diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.h b/source/extensions/filters/network/zookeeper_proxy/config.h similarity index 93% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_config.h rename to source/extensions/filters/network/zookeeper_proxy/config.h index 2dc1f86ba332c..1d813a15ef3c2 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_config.h +++ b/source/extensions/filters/network/zookeeper_proxy/config.h @@ -5,7 +5,7 @@ #include "extensions/filters/network/common/factory_base.h" #include "extensions/filters/network/well_known_names.h" -#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" +#include "extensions/filters/network/zookeeper_proxy/filter.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.cc b/source/extensions/filters/network/zookeeper_proxy/decoder.cc similarity index 99% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.cc rename to source/extensions/filters/network/zookeeper_proxy/decoder.cc index dddd22a0ef634..db2d5fb5b9195 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.cc +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.cc @@ -1,4 +1,6 @@ -#include "extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h" +#include "extensions/filters/network/zookeeper_proxy/decoder.h" + +#include namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h b/source/extensions/filters/network/zookeeper_proxy/decoder.h similarity index 98% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h rename to source/extensions/filters/network/zookeeper_proxy/decoder.h index 62144ef91006f..46efb96fe65e4 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.h @@ -1,12 +1,14 @@ #pragma once + #include +#include #include "envoy/common/platform.h" #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" -#include "extensions/filters/network/zookeeper_proxy/zookeeper_utils.h" +#include "extensions/filters/network/zookeeper_proxy/utils.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.cc b/source/extensions/filters/network/zookeeper_proxy/filter.cc similarity index 98% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.cc rename to source/extensions/filters/network/zookeeper_proxy/filter.cc index ac78aad9c7b71..3a11262f298f0 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.cc +++ b/source/extensions/filters/network/zookeeper_proxy/filter.cc @@ -1,4 +1,7 @@ -#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" +#include "extensions/filters/network/zookeeper_proxy/filter.h" + +#include +#include #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.h b/source/extensions/filters/network/zookeeper_proxy/filter.h similarity index 88% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.h rename to source/extensions/filters/network/zookeeper_proxy/filter.h index 20cdfec0a8f40..491a120329654 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_filter.h +++ b/source/extensions/filters/network/zookeeper_proxy/filter.h @@ -1,5 +1,9 @@ #pragma once +#include +#include +#include + #include "envoy/access_log/access_log.h" #include "envoy/network/connection.h" #include "envoy/network/filter.h" @@ -9,7 +13,7 @@ #include "common/common/logger.h" -#include "extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h" +#include "extensions/filters/network/zookeeper_proxy/decoder.h" namespace Envoy { namespace Extensions { @@ -48,7 +52,7 @@ namespace ZooKeeperProxy { COUNTER(setwatches_rq) \ COUNTER(checkwatches_rq) \ COUNTER(removewatches_rq) \ - COUNTER(check_rq) \ + COUNTER(check_rq) // clang-format on /** @@ -63,7 +67,8 @@ struct ZooKeeperProxyStats { */ class ZooKeeperFilterConfig { public: - ZooKeeperFilterConfig(const std::string &stat_prefix, uint32_t max_packet_bytes, Stats::Scope& scope); + ZooKeeperFilterConfig(const std::string& stat_prefix, uint32_t max_packet_bytes, + Stats::Scope& scope); const ZooKeeperProxyStats& stats() { return stats_; } uint32_t maxPacketBytes() const { return max_packet_bytes_; } @@ -74,10 +79,8 @@ class ZooKeeperFilterConfig { ZooKeeperProxyStats stats_; private: - ZooKeeperProxyStats generateStats(const std::string& prefix, - Stats::Scope& scope) { - return ZooKeeperProxyStats{ - ALL_ZOOKEEPER_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + ZooKeeperProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return ZooKeeperProxyStats{ALL_ZOOKEEPER_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; } }; @@ -86,7 +89,9 @@ using ZooKeeperFilterConfigSharedPtr = std::shared_ptr; /** * Implementation of ZooKeeper proxy filter. */ -class ZooKeeperFilter : public Network::Filter, DecoderCallbacks, Logger::Loggable { +class ZooKeeperFilter : public Network::Filter, + DecoderCallbacks, + Logger::Loggable { public: explicit ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config); @@ -135,7 +140,7 @@ class ZooKeeperFilter : public Network::Filter, DecoderCallbacks, Logger::Loggab std::unique_ptr decoder_; }; -} // namespace ZooKeeperProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy +} // namespace ZooKeeperProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.cc b/source/extensions/filters/network/zookeeper_proxy/utils.cc similarity index 95% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.cc rename to source/extensions/filters/network/zookeeper_proxy/utils.cc index 1a4ad1c7af4d2..ec2d524ee5842 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.cc +++ b/source/extensions/filters/network/zookeeper_proxy/utils.cc @@ -1,4 +1,6 @@ -#include "extensions/filters/network/zookeeper_proxy/zookeeper_utils.h" +#include "extensions/filters/network/zookeeper_proxy/utils.h" + +#include namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.h b/source/extensions/filters/network/zookeeper_proxy/utils.h similarity index 98% rename from source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.h rename to source/extensions/filters/network/zookeeper_proxy/utils.h index 559ef0f63093d..ad210a8150f4c 100644 --- a/source/extensions/filters/network/zookeeper_proxy/zookeeper_utils.h +++ b/source/extensions/filters/network/zookeeper_proxy/utils.h @@ -1,5 +1,7 @@ #pragma once + #include +#include #include "envoy/common/platform.h" diff --git a/test/extensions/filters/network/zookeeper_proxy/BUILD b/test/extensions/filters/network/zookeeper_proxy/BUILD index 81af4151cf112..bafa67b9d7761 100644 --- a/test/extensions/filters/network/zookeeper_proxy/BUILD +++ b/test/extensions/filters/network/zookeeper_proxy/BUILD @@ -15,9 +15,9 @@ load( envoy_package() envoy_extension_cc_test( - name = "zookeeper_filter_test", + name = "filter_test", srcs = [ - "zookeeper_filter_test.cc", + "filter_test.cc", ], extension_name = "envoy.filters.network.zookeeper_proxy", deps = [ diff --git a/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc similarity index 99% rename from test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc rename to test/extensions/filters/network/zookeeper_proxy/filter_test.cc index aeffd31e11af8..fc2e852ceffe0 100644 --- a/test/extensions/filters/network/zookeeper_proxy/zookeeper_filter_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc @@ -1,7 +1,7 @@ #include "common/buffer/buffer_impl.h" -#include "extensions/filters/network/zookeeper_proxy/zookeeper_decoder.h" -#include "extensions/filters/network/zookeeper_proxy/zookeeper_filter.h" +#include "extensions/filters/network/zookeeper_proxy/decoder.h" +#include "extensions/filters/network/zookeeper_proxy/filter.h" #include "test/mocks/network/mocks.h" From dcd7f4c57912a4d1045bdda490a5ca920845ae9d Mon Sep 17 00:00:00 2001 From: Gabriel Sagula Date: Thu, 11 Apr 2019 09:54:28 -0700 Subject: [PATCH 102/165] ext_authz: removed unnecessary assert from onSuccess gRPC client (#6505) Signed-off-by: Gabriel --- .../common/ext_authz/ext_authz_grpc_impl.cc | 2 -- .../ext_authz/ext_authz_grpc_impl_test.cc | 24 +++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index 236673d7e89af..bf1b242256d08 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -43,9 +43,7 @@ void GrpcClientImpl::check(RequestCallbacks& callbacks, void GrpcClientImpl::onSuccess(std::unique_ptr&& response, Tracing::Span& span) { - ASSERT(response->status().code() != Grpc::Status::GrpcStatus::Unknown); ResponsePtr authz_response = std::make_unique(Response{}); - if (response->status().code() == Grpc::Status::GrpcStatus::Ok) { span.setTag(Constants::get().TraceStatus, Constants::get().TraceOk); authz_response->status = CheckStatus::OK; diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index cbed03e797281..1094ac87f0f0e 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -135,6 +135,30 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { client_->onSuccess(std::move(check_response), span_); } +// Test the client when a gRPC status code unknown is received from the authorization server. +TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { + initialize(GetParam()); + + auto check_response = std::make_unique(); + auto status = check_response->mutable_status(); + status->set_code(Grpc::Status::GrpcStatus::Unknown); + auto authz_response = Response{}; + authz_response.status = CheckStatus::Denied; + + envoy::service::auth::v2::CheckRequest request; + expectCallSend(request); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + + Http::HeaderMapImpl headers; + client_->onCreateInitialMetadata(headers); + EXPECT_EQ(nullptr, headers.RequestId()); + EXPECT_CALL(span_, setTag("ext_authz_status", "ext_authz_unauthorized")); + EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo( + AuthzResponseNoAttributes(authz_response)))); + + client_->onSuccess(std::move(check_response), span_); +} + // Test the client when a denied response with additional HTTP attributes is received. TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { initialize(GetParam()); From 6f01652f2496f015d6ca2d16d06d60fe8fb96270 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Thu, 11 Apr 2019 14:31:44 -0400 Subject: [PATCH 103/165] test: relax some test expectations about exception strings (#6558) Signed-off-by: Elisha Ziskind --- test/common/router/config_impl_test.cc | 8 ++------ test/extensions/filters/http/ratelimit/config_test.cc | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 86d5bd0fa422b..4215294deaf50 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -4152,13 +4152,9 @@ TEST_F(RoutePropertyTest, TestBadCorsConfig) { enabled: 0 )EOF"; - EXPECT_THROW_WITH_MESSAGE( + EXPECT_THROW_WITH_REGEX( TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, - "Unable to parse JSON as proto " - "(INVALID_ARGUMENT:(virtual_hosts[0].routes[0].route.cors.enabled.value): invalid value 0 " - "for type TYPE_BOOL): " + - Json::Factory::loadFromYamlString(yaml)->asJsonString()); + EnvoyException, "Unable to parse JSON as proto .*: invalid value 0 for type TYPE_BOOL"); } TEST_F(RouteMatcherTest, Decorator) { diff --git a/test/extensions/filters/http/ratelimit/config_test.cc b/test/extensions/filters/http/ratelimit/config_test.cc index 4a1c2a9211b1b..6e0520b1960e7 100644 --- a/test/extensions/filters/http/ratelimit/config_test.cc +++ b/test/extensions/filters/http/ratelimit/config_test.cc @@ -74,7 +74,7 @@ TEST(RateLimitFilterConfigTest, BadRateLimitFilterConfig) { envoy::config::filter::http::rate_limit::v2::RateLimit proto_config{}; EXPECT_THROW_WITH_REGEX(MessageUtil::loadFromYamlAndValidate(yaml, proto_config), EnvoyException, - "INVALID_ARGUMENT:route_key: Cannot find field"); + "route_key: Cannot find field"); } } // namespace From d68eb4c0ae810334f85464a01a173ef4e47a7e35 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Thu, 11 Apr 2019 11:32:44 -0700 Subject: [PATCH 104/165] tools/examples: remove v1_to_bootstrap and v1 example configs (#6556) Signed-off-by: Derek Argueta --- configs/BUILD | 9 ---- configs/configgen.sh | 2 +- configs/google_com_proxy.json | 48 ------------------- configs/google_com_proxy.yaml | 31 ------------ .../configuration/overview/v2_overview.rst | 11 ----- test/config_test/example_configs_test.cc | 4 +- tools/BUILD | 14 ------ tools/cppcheck_wrapper.sh | 0 tools/envoy_collect/envoy_collect.py | 2 +- tools/v1_to_bootstrap.cc | 44 ----------------- 10 files changed, 4 insertions(+), 161 deletions(-) delete mode 100644 configs/google_com_proxy.json delete mode 100644 configs/google_com_proxy.yaml create mode 100644 tools/cppcheck_wrapper.sh delete mode 100644 tools/v1_to_bootstrap.cc diff --git a/configs/BUILD b/configs/BUILD index 7596ba2b41df1..9846609607e9e 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -29,19 +29,10 @@ filegroup( }), ) -genrule( - name = "v1_upgraded_configs", - srcs = ["google_com_proxy.yaml"], - outs = ["google_com_proxy.v2.upgraded.json"], - cmd = "$(location //tools:v1_to_bootstrap) $(location google_com_proxy.yaml) > $@", - tools = ["//tools:v1_to_bootstrap"], -) - genrule( name = "example_configs", srcs = [ ":configs", - ":v1_upgraded_configs", "//examples:configs", "//test/config/integration/certs", ], diff --git a/configs/configgen.sh b/configs/configgen.sh index 2ecf6b77ba06d..2e82ebff3dd98 100755 --- a/configs/configgen.sh +++ b/configs/configgen.sh @@ -25,4 +25,4 @@ for FILE in $*; do done # tar is having issues with -C for some reason so just cd into OUT_DIR. -(cd "$OUT_DIR"; tar -hcvf example_configs.tar *.json *.yaml certs/*.pem) +(cd "$OUT_DIR"; tar -hcvf example_configs.tar *.yaml certs/*.pem) diff --git a/configs/google_com_proxy.json b/configs/google_com_proxy.json deleted file mode 100644 index 6e131e1e1e543..0000000000000 --- a/configs/google_com_proxy.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "listeners": [{ - "address": "tcp://127.0.0.1:10000", - "filters": [{ - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "stat_prefix": "ingress_http", - "route_config": { - "virtual_hosts": [{ - "name": "local_service", - "domains": [ - "*" - ], - "routes": [{ - "timeout_ms": 0, - "prefix": "/", - "host_rewrite": "www.google.com", - "cluster": "service_google" - }] - }] - }, - "filters": [{ - "name": "router", - "config": {} - }] - } - }] - }], - "admin": { - "access_log_path": "/tmp/admin_access.log", - "address": "tcp://127.0.0.1:9901" - }, - "cluster_manager": { - "clusters": [{ - "name": "service_google", - "connect_timeout_ms": 250, - "type": "logical_dns", - "lb_type": "round_robin", - "hosts": [{ - "url": "tcp://google.com:443" - }], - "ssl_context": { - "sni": "www.google.com" - } - }] - } -} diff --git a/configs/google_com_proxy.yaml b/configs/google_com_proxy.yaml deleted file mode 100644 index 8683e9e4c9254..0000000000000 --- a/configs/google_com_proxy.yaml +++ /dev/null @@ -1,31 +0,0 @@ -listeners: -- address: tcp://127.0.0.1:10000 - filters: - - name: http_connection_manager - config: - codec_type: auto - stat_prefix: ingress_http - route_config: - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - prefix: "/" - timeout_ms: 0 - host_rewrite: www.google.com - cluster: service_google - filters: - - { name: router, config: {} } - -admin: - access_log_path: /tmp/admin_access.log - address: tcp://127.0.0.1:9901 - -cluster_manager: - clusters: - - name: service_google - connect_timeout_ms: 250 - type: logical_dns - lb_type: round_robin - hosts: [{ url: tcp://google.com:443 }] - ssl_context: { sni: www.google.com } diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst index c296684d1099a..6066f0d359331 100644 --- a/docs/root/configuration/overview/v2_overview.rst +++ b/docs/root/configuration/overview/v2_overview.rst @@ -332,17 +332,6 @@ The management server could respond to EDS requests with: address: 127.0.0.2 port_value: 1234 -Upgrading from v1 configuration -------------------------------- - -While new v2 bootstrap JSON/YAML can be written, it might be expedient to upgrade an existing -v1 JSON/YAML configuration to v2. To do this (in an Envoy source tree), -you can run: - -.. code-block:: console - - bazel run //tools:v1_to_bootstrap - .. _config_overview_v2_management_server: Management server diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index ca85b6f1ecad2..6da6d5e55f0af 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -17,9 +17,9 @@ TEST(ExampleConfigsTest, All) { #ifdef __APPLE__ // freebind/freebind.yaml is not supported on macOS and disabled via Bazel. - EXPECT_EQ(21UL, ConfigTest::run(directory)); + EXPECT_EQ(20UL, ConfigTest::run(directory)); #else - EXPECT_EQ(22UL, ConfigTest::run(directory)); + EXPECT_EQ(21UL, ConfigTest::run(directory)); #endif ConfigTest::testMerge(); diff --git a/tools/BUILD b/tools/BUILD index ab8452c075a78..7d2786840b774 100644 --- a/tools/BUILD +++ b/tools/BUILD @@ -36,17 +36,3 @@ envoy_cc_binary( "@envoy_api//envoy/config/bootstrap/v2:bootstrap_cc", ] + envoy_cc_platform_dep("//source/exe:platform_impl_lib"), ) - -envoy_cc_binary( - name = "v1_to_bootstrap", - srcs = ["v1_to_bootstrap.cc"], - deps = [ - "//source/common/api:api_lib", - "//source/common/config:bootstrap_json_lib", - "//source/common/json:json_loader_lib", - "//source/common/protobuf:utility_lib", - "//source/common/stats:isolated_store_lib", - "//source/common/stats:stats_options_lib", - "@envoy_api//envoy/config/bootstrap/v2:bootstrap_cc", - ] + envoy_cc_platform_dep("//source/exe:platform_impl_lib"), -) diff --git a/tools/cppcheck_wrapper.sh b/tools/cppcheck_wrapper.sh new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tools/envoy_collect/envoy_collect.py b/tools/envoy_collect/envoy_collect.py index 78841d93a8972..c22a526a37b78 100755 --- a/tools/envoy_collect/envoy_collect.py +++ b/tools/envoy_collect/envoy_collect.py @@ -4,7 +4,7 @@ Example use: ./tools/envoy_collect.py --output-path=./envoy.tar -c - ./configs/google_com_proxy.json --service-node foo + ./configs/google_com_proxy.v2.yaml --service-node foo tar -tvf ./envoy.tar -rw------- htuch/eng 0 2017-08-13 21:13 access_0.log diff --git a/tools/v1_to_bootstrap.cc b/tools/v1_to_bootstrap.cc deleted file mode 100644 index a4d261f5a7009..0000000000000 --- a/tools/v1_to_bootstrap.cc +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Utility to convert v1 JSON configuration file to v2 bootstrap JSON (on stdout). - * - * Usage: - * - * v1_to_bootstrap - */ -#include - -#include "envoy/config/bootstrap/v2/bootstrap.pb.h" -#include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" - -#include "common/api/api_impl.h" -#include "common/config/bootstrap_json.h" -#include "common/event/real_time_system.h" -#include "common/json/json_loader.h" -#include "common/protobuf/utility.h" -#include "common/stats/isolated_store_impl.h" -#include "common/stats/stats_options_impl.h" - -#include "exe/platform_impl.h" - -// NOLINT(namespace-envoy) -int main(int argc, char** argv) { - if (argc != 2) { - std::cerr << "Usage: " << argv[0] << " " << std::endl; - return EXIT_FAILURE; - } - - Envoy::PlatformImpl platform_impl_; - Envoy::Stats::IsolatedStoreImpl stats_store; - Envoy::Event::RealTimeSystem time_system; // NO_CHECK_FORMAT(real_time) - Envoy::Api::Impl api(platform_impl_.threadFactory(), stats_store, time_system, - platform_impl_.fileSystem()); - - envoy::config::bootstrap::v2::Bootstrap bootstrap; - auto config_json = Envoy::Json::Factory::loadFromFile(argv[1], api); - Envoy::Stats::StatsOptionsImpl stats_options; - Envoy::Config::BootstrapJson::translateBootstrap(*config_json, bootstrap, stats_options); - Envoy::MessageUtil::validate(bootstrap); - std::cout << Envoy::MessageUtil::getJsonStringFromMessage(bootstrap, true); - - return EXIT_SUCCESS; -} From eb9dd5cd3fccdcdd9bce4955b110ce875c5699ed Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Thu, 11 Apr 2019 12:08:44 -0700 Subject: [PATCH 105/165] remove empty file (#6562) Signed-off-by: Derek Argueta --- tools/cppcheck_wrapper.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tools/cppcheck_wrapper.sh diff --git a/tools/cppcheck_wrapper.sh b/tools/cppcheck_wrapper.sh deleted file mode 100644 index e69de29bb2d1d..0000000000000 From e612fc9a29ac501928a0c827e156a4633877394e Mon Sep 17 00:00:00 2001 From: Kyle Larose Date: Thu, 11 Apr 2019 15:10:47 -0400 Subject: [PATCH 106/165] upstream: add connection pool circuit breaker stats (#6516) Users will want to know how often connection pool overflows are happening. Count them. Signed-off-by: Kyle Larose --- .../cluster_manager/cluster_stats.rst | 1 + .../intro/arch_overview/circuit_breaking.rst | 4 +++ docs/root/intro/version_history.rst | 1 + include/envoy/upstream/upstream.h | 1 + source/common/upstream/conn_pool_map_impl.h | 3 +- .../upstream/conn_pool_map_impl_test.cc | 34 +++++++++++++++++++ test/integration/integration_test.cc | 2 ++ 7 files changed, 44 insertions(+), 2 deletions(-) diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst index 370a9e1402d17..f881e8963ccdd 100644 --- a/docs/root/configuration/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/cluster_manager/cluster_stats.rst @@ -56,6 +56,7 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_cx_rx_bytes_buffered, Gauge, Received connection bytes currently buffered upstream_cx_tx_bytes_total, Counter, Total sent connection bytes upstream_cx_tx_bytes_buffered, Gauge, Send connection bytes currently buffered + upstream_cx_pool_overflow, Counter, Total times that the cluster's connection pool circuit breaker overflowed upstream_cx_protocol_error, Counter, Total connection protocol errors upstream_cx_max_requests, Counter, Total connections closed due to maximum requests upstream_cx_none_healthy, Counter, Total times connection not established due to no healthy hosts diff --git a/docs/root/intro/arch_overview/circuit_breaking.rst b/docs/root/intro/arch_overview/circuit_breaking.rst index 152284363fb51..57dc097dba90d 100644 --- a/docs/root/intro/arch_overview/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/circuit_breaking.rst @@ -49,6 +49,10 @@ configure and code each application independently. Envoy supports various types clean up; connection pools do not. Note that in order for a connection pool to function it needs at least one upstream connection, so this value should likely be no greater than :ref:`Cluster maximum connections `. + If this circuit breaker overflows the + :ref:`upstream_cx_pool_overflow ` counter for the cluster + will increment. + Each circuit breaking limit is :ref:`configurable ` and tracked on a per upstream cluster and per priority basis. This allows different components of diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 98a5bbc05314c..86d397da05ad8 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -3,6 +3,7 @@ Version history 1.11.0 (Pending) ================ +* upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. 1.10.0 (Apr 5, 2019) ==================== diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 3fdb15cdeee17..214096d79390e 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -480,6 +480,7 @@ class PrioritySet { COUNTER (upstream_cx_protocol_error) \ COUNTER (upstream_cx_max_requests) \ COUNTER (upstream_cx_none_healthy) \ + COUNTER (upstream_cx_pool_overflow) \ COUNTER (upstream_rq_total) \ GAUGE (upstream_rq_active) \ COUNTER (upstream_rq_completed) \ diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index 156751942535f..19fa80ce4baa0 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -35,8 +35,7 @@ ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& facto if (!connPoolResource.canCreate()) { // We're full. Try to free up a pool. If we can't, bail out. if (!freeOnePool()) { - // TODO(klarose): Add some explicit counters for failure cases here, similar to the other - // circuit breakers. + host_->cluster().stats().upstream_cx_pool_overflow_.inc(); return absl::nullopt; } diff --git a/test/common/upstream/conn_pool_map_impl_test.cc b/test/common/upstream/conn_pool_map_impl_test.cc index b8183d2eae29b..83839f4e76b66 100644 --- a/test/common/upstream/conn_pool_map_impl_test.cc +++ b/test/common/upstream/conn_pool_map_impl_test.cc @@ -225,6 +225,28 @@ TEST_F(ConnPoolMapImplTest, GetPoolHittingLimitFails) { EXPECT_EQ(test_map->size(), 1); } +TEST_F(ConnPoolMapImplTest, GetPoolHittingLimitIncrementsFailureCounter) { + TestMapPtr test_map = makeTestMapWithLimit(1); + + test_map->getPool(1, getBasicFactory()); + ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(true)); + test_map->getPool(2, getNeverCalledFactory()); + + EXPECT_EQ(host_->cluster_.stats_.upstream_cx_pool_overflow_.value(), 1); +} + +TEST_F(ConnPoolMapImplTest, GetPoolHittingLimitIncrementsFailureMultiple) { + TestMapPtr test_map = makeTestMapWithLimit(1); + + test_map->getPool(1, getBasicFactory()); + ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(true)); + test_map->getPool(2, getNeverCalledFactory()); + test_map->getPool(2, getNeverCalledFactory()); + test_map->getPool(2, getNeverCalledFactory()); + + EXPECT_EQ(host_->cluster_.stats_.upstream_cx_pool_overflow_.value(), 3); +} + TEST_F(ConnPoolMapImplTest, GetPoolHittingLimitGreaterThan1Fails) { TestMapPtr test_map = makeTestMapWithLimit(2); @@ -250,6 +272,18 @@ TEST_F(ConnPoolMapImplTest, GetPoolLimitHitThenOneFreesUpNextCallSucceeds) { EXPECT_EQ(test_map->size(), 1); } +TEST_F(ConnPoolMapImplTest, GetPoolLimitHitFollowedBySuccessDoesNotClearFailure) { + TestMapPtr test_map = makeTestMapWithLimit(1); + + test_map->getPool(1, getActivePoolFactory()); + test_map->getPool(2, getNeverCalledFactory()); + + ON_CALL(*mock_pools_[0], hasActiveConnections()).WillByDefault(Return(false)); + + test_map->getPool(2, getBasicFactory()); + EXPECT_EQ(host_->cluster_.stats_.upstream_cx_pool_overflow_.value(), 1); +} + // Test that only the pool which are idle are actually cleared TEST_F(ConnPoolMapImplTest, GetOnePoolIdleOnlyClearsThatOne) { TestMapPtr test_map = makeTestMapWithLimit(2); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index dd0316991b66c..dd7406282aba9 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -778,6 +778,8 @@ TEST_P(IntegrationTest, NoConnectionPoolsFree) { EXPECT_STREQ("503", response->headers().Status()->value().c_str()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_503", 1); + + EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_pool_overflow")->value(), 1); } INSTANTIATE_TEST_SUITE_P(IpVersions, UpstreamEndpointIntegrationTest, From 2135e1a8f3db99f814419f6249027bfa8d3be489 Mon Sep 17 00:00:00 2001 From: James Synge Date: Thu, 11 Apr 2019 15:51:34 -0400 Subject: [PATCH 107/165] docs: update websocket.rst (#6546) Link to RFC 8441 rather than the earlier working group draft. Fixes #6528 Signed-off-by: James Synge --- docs/root/intro/arch_overview/websocket.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/root/intro/arch_overview/websocket.rst b/docs/root/intro/arch_overview/websocket.rst index dab57656eb276..e854eb53bb271 100644 --- a/docs/root/intro/arch_overview/websocket.rst +++ b/docs/root/intro/arch_overview/websocket.rst @@ -32,15 +32,15 @@ laid out below, but custom filter chains can only be configured on a per-HttpCon | F | F | F | +-----------------------+-------------------------+-------------------+ -Note that the statistics for upgrades are all bundled together so websocket +Note that the statistics for upgrades are all bundled together so WebSocket :ref:`statistics ` are tracked by stats such as downstream_cx_upgrades_total and downstream_cx_upgrades_active Handling H2 hops ^^^^^^^^^^^^^^^^ -Envoy currently has an alpha implementation of tunneling websockets over H2 streams for deployments -that prefer a uniform H2 mesh throughout, for example, for a deployment of the form: +Envoy supports tunneling WebSockets over H2 streams for deployments that prefer a uniform +H2 mesh throughout; this enables, for example, a deployment of the form: [Client] ---- HTTP/1.1 ---- [Front Envoy] ---- HTTP/2 ---- [Sidecar Envoy ---- H1 ---- App] @@ -48,7 +48,7 @@ In this case, if a client is for example using WebSocket, we want the Websocket upstream server functionally intact, which means it needs to traverse the HTTP/2 hop. This is accomplished via -`extended CONNECT `_ support. The +`extended CONNECT `_ support. The WebSocket request will be transformed into an HTTP/2 CONNECT stream, with :protocol header indicating the original upgrade, traverse the HTTP/2 hop, and be downgraded back into an HTTP/1 WebSocket Upgrade. This same Upgrade-CONNECT-Upgrade transformation will be performed on any From a9b49bf734b279148e5f3f71392bc8bd445888a9 Mon Sep 17 00:00:00 2001 From: James Synge Date: Thu, 11 Apr 2019 22:38:34 -0400 Subject: [PATCH 108/165] test: Add EXPECT_THAT_THROWS_MESSAGE, allowing arbitrary string matchers (#6561) This was inspired by the same problem that PR#6448 is fixing, namely an overly strict string match. Elisha wanted a more focused fix, so I'm offering this for the next such case, enabling a test in test/common/router/config_impl_test.cc such as: EXPECT_THAT_THROWS_MESSAGE( TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), EnvoyException, AllOf(HasSubstr("Unable to parse"), HasSubstr("virtual_hosts[0].routes[0].route.cors.enabled.value"), HasSubstr("invalid value 0 for type TYPE_BOOL"))); Signed-off-by: James Synge --- test/test_common/utility.h | 40 +++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 6d83be806879d..063e6c89ae9ed 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -37,29 +37,41 @@ using testing::AssertionSuccess; using testing::Invoke; namespace Envoy { -#define EXPECT_THROW_WITH_MESSAGE(statement, expected_exception, message) \ + +/* + Macro to use for validating that a statement throws the specified type of exception, and that + the exception's what() method returns a string which is matched by the specified matcher. + This allows for expectations such as: + + EXPECT_THAT_THROWS_MESSAGE( + bad_function_call(), + EnvoyException, + AllOf(StartsWith("expected prefix"), HasSubstr("some substring"))); +*/ +#define EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception, matcher) \ try { \ statement; \ ADD_FAILURE() << "Exception should take place. It did not."; \ } catch (expected_exception & e) { \ - EXPECT_EQ(message, std::string(e.what())); \ + EXPECT_THAT(std::string(e.what()), matcher); \ } +// Expect that the statement throws the specified type of exception with exactly the specified +// message. +#define EXPECT_THROW_WITH_MESSAGE(statement, expected_exception, message) \ + EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception, ::testing::Eq(message)) + +// Expect that the statement throws the specified type of exception with a message containing a +// substring matching the specified regular expression (i.e. the regex doesn't have to match +// the entire message). #define EXPECT_THROW_WITH_REGEX(statement, expected_exception, regex_str) \ - try { \ - statement; \ - ADD_FAILURE() << "Exception should take place. It did not."; \ - } catch (expected_exception & e) { \ - EXPECT_THAT(e.what(), ::testing::ContainsRegex(regex_str)); \ - } + EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception, ::testing::ContainsRegex(regex_str)) +// Expect that the statement throws the specified type of exception with a message that does not +// contain any substring matching the specified regular expression. #define EXPECT_THROW_WITHOUT_REGEX(statement, expected_exception, regex_str) \ - try { \ - statement; \ - ADD_FAILURE() << "Exception should take place. It did not."; \ - } catch (expected_exception & e) { \ - EXPECT_THAT(e.what(), ::testing::Not(::testing::ContainsRegex(regex_str))); \ - } + EXPECT_THAT_THROWS_MESSAGE(statement, expected_exception, \ + ::testing::Not(::testing::ContainsRegex(regex_str))) #define VERBOSE_EXPECT_NO_THROW(statement) \ try { \ From db7f12427ffedebba5891e724f2ef39d3405c81e Mon Sep 17 00:00:00 2001 From: "leilei.gll" Date: Fri, 12 Apr 2019 14:04:43 +0800 Subject: [PATCH 109/165] dubbo_proxy: Refactor the DubboProxy filter (#6410) Description: Refactor the DubboProxy filter Risk Level: low Testing: unit test Docs Changes: inline Release Notes: add routing capabilities for the Dubbo protocol Signed-off-by: leilei.gll --- api/docs/BUILD | 2 + .../filter/network/dubbo_proxy/v2alpha1/BUILD | 4 +- .../dubbo_proxy/v2alpha1/dubbo_proxy.proto | 6 +- .../network/dubbo_proxy/v2alpha1/route.proto | 66 +- docs/build.sh | 3 + .../root/api-v2/config/filter/dubbo/dubbo.rst | 8 + docs/root/api-v2/config/filter/filter.rst | 1 + docs/root/configuration/configuration.rst | 1 + .../dubbo_filters/dubbo_filters.rst | 11 + .../dubbo_filters/router_filter.rst | 11 + .../network_filters/dubbo_proxy_filter.rst | 82 ++ .../network_filters/network_filters.rst | 1 + docs/root/intro/version_history.rst | 1 + .../filters/network/dubbo_proxy/BUILD | 85 +- .../network/dubbo_proxy/active_message.cc | 421 ++++++ .../network/dubbo_proxy/active_message.h | 170 +++ .../network/dubbo_proxy/app_exception.cc | 3 + .../network/dubbo_proxy/app_exception.h | 4 + .../network/dubbo_proxy/buffer_helper.h | 56 - .../filters/network/dubbo_proxy/config.cc | 125 +- .../filters/network/dubbo_proxy/config.h | 44 + .../network/dubbo_proxy/conn_manager.cc | 208 +++ .../network/dubbo_proxy/conn_manager.h | 107 ++ .../filters/network/dubbo_proxy/decoder.cc | 190 ++- .../filters/network/dubbo_proxy/decoder.h | 119 +- .../network/dubbo_proxy/deserializer.h | 14 +- .../network/dubbo_proxy/deserializer_impl.cc | 10 +- .../network/dubbo_proxy/deserializer_impl.h | 15 - .../dubbo_proxy/dubbo_protocol_impl.cc | 74 +- .../network/dubbo_proxy/dubbo_protocol_impl.h | 64 - .../filters/network/dubbo_proxy/filter.cc | 238 --- .../filters/network/dubbo_proxy/filter.h | 108 -- .../network/dubbo_proxy/heartbeat_response.cc | 27 + .../network/dubbo_proxy/heartbeat_response.h | 26 + .../dubbo_proxy/hessian_deserializer_impl.cc | 16 +- .../dubbo_proxy/hessian_deserializer_impl.h | 4 +- .../filters/network/dubbo_proxy/protocol.h | 39 +- .../network/dubbo_proxy/router/router_impl.cc | 11 +- .../network/dubbo_proxy/router/router_impl.h | 1 + .../filters/network/dubbo_proxy/stats.h | 11 +- .../filters/network/dubbo_proxy/BUILD | 45 +- .../network/dubbo_proxy/config_test.cc | 126 +- .../network/dubbo_proxy/conn_manager_test.cc | 1274 +++++++++++++++++ .../network/dubbo_proxy/decoder_test.cc | 255 ++++ .../dubbo_proxy/dubbo_protocol_impl_test.cc | 54 +- .../network/dubbo_proxy/filter_test.cc | 560 -------- .../hessian_deserializer_impl_test.cc | 15 +- .../filters/network/dubbo_proxy/mocks.cc | 20 +- .../filters/network/dubbo_proxy/mocks.h | 20 +- .../network/dubbo_proxy/router_test.cc | 1 - 50 files changed, 3413 insertions(+), 1344 deletions(-) create mode 100644 docs/root/api-v2/config/filter/dubbo/dubbo.rst create mode 100644 docs/root/configuration/dubbo_filters/dubbo_filters.rst create mode 100644 docs/root/configuration/dubbo_filters/router_filter.rst create mode 100644 docs/root/configuration/network_filters/dubbo_proxy_filter.rst create mode 100644 source/extensions/filters/network/dubbo_proxy/active_message.cc create mode 100644 source/extensions/filters/network/dubbo_proxy/active_message.h create mode 100644 source/extensions/filters/network/dubbo_proxy/conn_manager.cc create mode 100644 source/extensions/filters/network/dubbo_proxy/conn_manager.h delete mode 100644 source/extensions/filters/network/dubbo_proxy/filter.cc delete mode 100644 source/extensions/filters/network/dubbo_proxy/filter.h create mode 100644 source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc create mode 100644 source/extensions/filters/network/dubbo_proxy/heartbeat_response.h create mode 100644 test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc create mode 100644 test/extensions/filters/network/dubbo_proxy/decoder_test.cc delete mode 100644 test/extensions/filters/network/dubbo_proxy/filter_test.cc diff --git a/api/docs/BUILD b/api/docs/BUILD index ead494196ab73..73e9473e152f9 100644 --- a/api/docs/BUILD +++ b/api/docs/BUILD @@ -35,6 +35,7 @@ proto_library( "//envoy/config/bootstrap/v2:bootstrap", "//envoy/config/common/tap/v2alpha:common", "//envoy/config/filter/accesslog/v2:accesslog", + "//envoy/config/filter/dubbo/router/v2alpha1:router", "//envoy/config/filter/http/buffer/v2:buffer", "//envoy/config/filter/http/ext_authz/v2:ext_authz", "//envoy/config/filter/http/fault/v2:fault", @@ -52,6 +53,7 @@ proto_library( "//envoy/config/filter/http/transcoder/v2:transcoder", "//envoy/config/filter/listener/original_src/v2alpha1:original_src", "//envoy/config/filter/network/client_ssl_auth/v2:client_ssl_auth", + "//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy", "//envoy/config/filter/network/ext_authz/v2:ext_authz", "//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager", "//envoy/config/filter/network/mongo_proxy/v2:mongo_proxy", diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD index a2ae87ffcfae4..e3e83a7046847 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD @@ -1,8 +1,8 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal") licenses(["notice"]) # Apache 2 -api_proto_library( +api_proto_library_internal( name = "dubbo_proxy", srcs = [ "dubbo_proxy.proto", diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index e639830794741..5b0995ba0022d 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -15,7 +15,9 @@ import "validate/validate.proto"; import "gogoproto/gogo.proto"; // [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy filter configuration. +// Dubbo Proxy :ref:`configuration overview `. + +// [#comment:next free field: 6] message DubboProxy { // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; @@ -36,10 +38,12 @@ message DubboProxy { repeated DubboFilter dubbo_filters = 5; } +// Dubbo Protocol types supported by Envoy. enum ProtocolType { Dubbo = 0; // the default protocol. } +// Dubbo Serialization types supported by Envoy. enum SerializationType { Hessian2 = 0; // the default serialization protocol. } diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto index bc5f682554946..84b6d3fc5c174 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto @@ -18,8 +18,10 @@ import "gogoproto/gogo.proto"; option (gogoproto.stable_marshaler_all) = true; -// [#protodoc-title: Dubbo route configuration] +// [#protodoc-title: Dubbo Proxy Route Configuration] +// Dubbo Proxy :ref:`configuration overview `. +// [#comment:next free field: 6] message RouteConfiguration { // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; @@ -38,6 +40,7 @@ message RouteConfiguration { repeated Route routes = 5 [(gogoproto.nullable) = false]; } +// [#comment:next free field: 3] message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message.required = true, (gogoproto.nullable) = false]; @@ -46,6 +49,35 @@ message Route { RouteAction route = 2 [(validate.rules).message.required = true, (gogoproto.nullable) = false]; } +// [#comment:next free field: 3] +message RouteMatch { + // Method level routing matching. + MethodMatch method = 1; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated envoy.api.v2.route.HeaderMatcher headers = 2; +} + +// [#comment:next free field: 3] +message RouteAction { + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + envoy.api.v2.route.WeightedCluster weighted_clusters = 2; + } +} + +// [#comment:next free field: 5] message MethodMatch { // The name of the method. envoy.type.matcher.StringMatcher name = 1; @@ -66,8 +98,7 @@ message MethodMatch { // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, - // "-1somestring" + // "somestring", 10.9, "-1somestring" envoy.type.Int64Range range_match = 4; } } @@ -77,32 +108,3 @@ message MethodMatch { // The value is the parameter matching type. map params_match = 2; } - -message RouteMatch { - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated envoy.api.v2.route.HeaderMatcher headers = 2; -} - -// [#comment:next free field: 2] -message RouteAction { - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // - // .. note:: - // Currently ClusterWeight only supports the name and weight fields. - envoy.api.v2.route.WeightedCluster weighted_clusters = 2; - } -} diff --git a/docs/build.sh b/docs/build.sh index 2985c0e51a787..6d6a88c2bb7a5 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -105,6 +105,9 @@ PROTO_RST=" /envoy/config/filter/http/tap/v2alpha/tap/envoy/config/filter/http/tap/v2alpha/tap.proto.rst /envoy/config/filter/http/transcoder/v2/transcoder/envoy/config/filter/http/transcoder/v2/transcoder.proto.rst /envoy/config/filter/listener/original_src/v2alpha1/original_src/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto.rst + /envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto.rst + /envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto.rst + /envoy/config/filter/dubbo/router/v2alpha1/router/envoy/config/filter/dubbo/router/v2alpha1/router.proto.rst /envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto.rst /envoy/config/filter/network/ext_authz/v2/ext_authz/envoy/config/filter/network/ext_authz/v2/ext_authz.proto.rst /envoy/config/filter/network/http_connection_manager/v2/http_connection_manager/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto.rst diff --git a/docs/root/api-v2/config/filter/dubbo/dubbo.rst b/docs/root/api-v2/config/filter/dubbo/dubbo.rst new file mode 100644 index 0000000000000..d90e49b707dae --- /dev/null +++ b/docs/root/api-v2/config/filter/dubbo/dubbo.rst @@ -0,0 +1,8 @@ +Dubbo filters +============== + +.. toctree:: + :glob: + :maxdepth: 2 + + */v2alpha1/* diff --git a/docs/root/api-v2/config/filter/filter.rst b/docs/root/api-v2/config/filter/filter.rst index 88385094a2f44..6ddd5e15abf30 100644 --- a/docs/root/api-v2/config/filter/filter.rst +++ b/docs/root/api-v2/config/filter/filter.rst @@ -11,3 +11,4 @@ Filters accesslog/v2/accesslog.proto fault/v2/fault.proto listener/listener + dubbo/dubbo diff --git a/docs/root/configuration/configuration.rst b/docs/root/configuration/configuration.rst index fca889b67559a..3effeaa8e8554 100644 --- a/docs/root/configuration/configuration.rst +++ b/docs/root/configuration/configuration.rst @@ -14,6 +14,7 @@ Configuration reference http_conn_man/http_conn_man http_filters/http_filters thrift_filters/thrift_filters + dubbo_filters/dubbo_filters cluster_manager/cluster_manager health_checkers/health_checkers access_log diff --git a/docs/root/configuration/dubbo_filters/dubbo_filters.rst b/docs/root/configuration/dubbo_filters/dubbo_filters.rst new file mode 100644 index 0000000000000..2577324dd3554 --- /dev/null +++ b/docs/root/configuration/dubbo_filters/dubbo_filters.rst @@ -0,0 +1,11 @@ +.. _config_dubbo_filters: + +Dubbo filters +=============== + +Envoy has the following builtin Dubbo filters. + +.. toctree:: + :maxdepth: 2 + + router_filter diff --git a/docs/root/configuration/dubbo_filters/router_filter.rst b/docs/root/configuration/dubbo_filters/router_filter.rst new file mode 100644 index 0000000000000..f4393238d9836 --- /dev/null +++ b/docs/root/configuration/dubbo_filters/router_filter.rst @@ -0,0 +1,11 @@ +.. _config_dubbo_filters_router: + +Router +====== + +The router filter implements Dubbo forwarding. It will be used in almost all Dubbo proxying +scenarios. The filter's main job is to follow the instructions specified in the configured +:ref:`route table `. + +* :ref:`v2 API reference ` +* This filter should be configured with the name *envoy.router*. diff --git a/docs/root/configuration/network_filters/dubbo_proxy_filter.rst b/docs/root/configuration/network_filters/dubbo_proxy_filter.rst new file mode 100644 index 0000000000000..503dd6970a9b6 --- /dev/null +++ b/docs/root/configuration/network_filters/dubbo_proxy_filter.rst @@ -0,0 +1,82 @@ +.. _config_network_filters_dubbo_proxy: + +Dubbo proxy +============ + +The dubbo proxy filter decodes the RPC protocol between dubbo clients +and servers. the decoded RPC information is converted to metadata. +the metadata includes the basic request ID, request type, serialization type, +and the required service name, method name, parameter name, +and parameter value for routing. + +* :ref:`v2 API reference ` +* This filter should be configured with the name *envoy.filters.network.dubbo_proxy*. + +.. _config_network_filters_dubbo_proxy_stats: + +Statistics +---------- + +Every configured dubbo proxy filter has statistics rooted at *redis..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + request, Counter, Total requests + request_twoway, Counter, Total twoway requests + request_oneway, Counter, Total oneway requests + request_event, Counter, Total event requests + request_decoding_error, Counter, Total decoding error requests + request_decoding_success, Counter, Total decoding success requests + request_active, Gauge, Total active requests + response, Counter, Total responses + response_success, Counter, Total success responses + response_error, Counter, Total responses that protocol parse error + response_error_caused_connection_close, Counter, Total responses that caused by the downstream connection close + response_business_exception, Counter, Total responses that the protocol contains exception information returned by the business layer + response_decoding_error, Counter, Total decoding error responses + response_decoding_success, Counter, Total decoding success responses + response_error, Counter, Total responses that protocol parse error + local_response_success, Counter, Total local responses + local_response_error, Counter, Total local responses that encoding error + local_response_business_exception, Counter, Total local responses that the protocol contains business exception + cx_destroy_local_with_active_rq, Counter, Connections destroyed locally with an active query + cx_destroy_remote_with_active_rq, Counter, Connections destroyed remotely with an active query + + +Implement custom filter based on the dubbo proxy filter +-------------------------------------------------------- + +If you want to implement a custom filter based on the dubbo protocol, +the dubbo proxy filter like HTTP also provides a very convenient way to expand, +the first step is to implement the DecoderFilter interface, and give the filter named, such as testFilter, +the second step is to add your configuration, configuration method refer to the following sample + +.. code-block:: yaml + + filter_chains: + - filters: + - name: envoy.filters.network.dubbo_proxy + config: + stat_prefix: dubbo_incomming_stats + protocol_type: Dubbo + serialization_type: Hessian2 + route_config: + name: local_route + interface: org.apache.dubbo.demo.DemoService + routes: + - match: + method: + name: + exact: sayHello + route: + cluster: user_service_dubbo_server + dubbo_filters: + - name: envoy.filters.dubbo.testFilter + config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + name: test_service + - name: envoy.filters.dubbo.router \ No newline at end of file diff --git a/docs/root/configuration/network_filters/network_filters.rst b/docs/root/configuration/network_filters/network_filters.rst index 91693bc40ab05..f43f474ac6547 100644 --- a/docs/root/configuration/network_filters/network_filters.rst +++ b/docs/root/configuration/network_filters/network_filters.rst @@ -10,6 +10,7 @@ filters. .. toctree:: :maxdepth: 2 + dubbo_proxy_filter client_ssl_auth_filter echo_filter ext_authz_filter diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 86d397da05ad8..ed65a13e15ca3 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -3,6 +3,7 @@ Version history 1.11.0 (Pending) ================ +* dubbo_proxy: support the :ref:`Dubbo proxy filter `. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. 1.10.0 (Apr 5, 2019) diff --git a/source/extensions/filters/network/dubbo_proxy/BUILD b/source/extensions/filters/network/dubbo_proxy/BUILD index 250d8c15462bb..46159802a9475 100644 --- a/source/extensions/filters/network/dubbo_proxy/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/BUILD @@ -62,6 +62,7 @@ envoy_cc_library( ], deps = [ ":message_lib", + ":metadata_lib", "//include/envoy/buffer:buffer_interface", "//source/common/common:assert_lib", "//source/common/config:utility_lib", @@ -88,42 +89,34 @@ envoy_cc_library( srcs = ["decoder.cc"], hdrs = ["decoder.h"], deps = [ + ":decoder_events_lib", ":dubbo_protocol_impl_lib", + ":heartbeat_response_lib", ":hessian_deserializer_impl_lib", "//source/common/buffer:buffer_lib", "//source/common/common:logger_lib", ], ) -envoy_cc_library( - name = "filter_lib", - srcs = ["filter.cc"], - hdrs = ["filter.h"], - deps = [ - ":decoder_lib", - ":stats_lib", - "//include/envoy/network:connection_interface", - "//include/envoy/network:filter_interface", - "//include/envoy/stats:stats_interface", - "//include/envoy/stats:stats_macros", - "//include/envoy/stats:timespan", - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:logger_lib", - "//source/common/network:filter_lib", - "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", - ], -) - envoy_cc_library( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], deps = [ - ":filter_lib", + ":conn_manager_lib", "//include/envoy/registry", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/common:utility_lib", + "//source/common/config:filter_json_lib", + "//source/common/config:utility_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:factory_base_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:well_known_names", + "//source/extensions/filters/network/dubbo_proxy/router:config", + "//source/extensions/filters/network/dubbo_proxy/router:route_matcher", + "//source/extensions/filters/network/dubbo_proxy/router:router_lib", "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", ], ) @@ -176,3 +169,53 @@ envoy_cc_library( "//source/extensions/filters/network/dubbo_proxy/filters:filter_interface", ], ) + +envoy_cc_library( + name = "heartbeat_response_lib", + srcs = ["heartbeat_response.cc"], + hdrs = ["heartbeat_response.h"], + deps = [ + ":deserializer_interface", + ":metadata_lib", + ":protocol_interface", + "//include/envoy/buffer:buffer_interface", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_interface", + ], +) + +envoy_cc_library( + name = "conn_manager_lib", + srcs = [ + "active_message.cc", + "conn_manager.cc", + ], + hdrs = [ + "active_message.h", + "conn_manager.h", + ], + deps = [ + ":app_exception_lib", + ":decoder_events_lib", + ":decoder_lib", + ":dubbo_protocol_impl_lib", + ":heartbeat_response_lib", + ":hessian_deserializer_impl_lib", + ":stats_lib", + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/network:filter_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:timespan", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:linked_object", + "//source/common/common:logger_lib", + "//source/common/network:filter_lib", + "//source/common/stream_info:stream_info_lib", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_interface", + "//source/extensions/filters/network/dubbo_proxy/router:router_interface", + "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", + ], +) diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc new file mode 100644 index 0000000000000..179c6852b3703 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -0,0 +1,421 @@ +#include "extensions/filters/network/dubbo_proxy/active_message.h" + +#include "extensions/filters/network/dubbo_proxy/app_exception.h" +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +// class ResponseDecoder +ResponseDecoder::ResponseDecoder(Buffer::Instance& buffer, DubboFilterStats& stats, + Network::Connection& connection, Deserializer& deserializer, + Protocol& protocol) + : response_buffer_(buffer), stats_(stats), response_connection_(connection), + decoder_(std::make_unique(protocol, deserializer, *this)), complete_(false) {} + +bool ResponseDecoder::onData(Buffer::Instance& data) { + ENVOY_LOG(debug, "dubbo response: the received reply data length is {}", data.length()); + + bool underflow = false; + decoder_->onData(data, underflow); + ASSERT(complete_ || underflow); + return complete_; +} + +Network::FilterStatus ResponseDecoder::transportBegin() { + stats_.response_.inc(); + response_buffer_.drain(response_buffer_.length()); + ProtocolDataPassthroughConverter::initProtocolConverter(response_buffer_); + + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ResponseDecoder::transportEnd() { + if (response_connection_.state() != Network::Connection::State::Open) { + throw DownstreamConnectionCloseException("Downstream has closed or closing"); + } + + response_connection_.write(response_buffer_, false); + ENVOY_LOG(debug, + "dubbo response: the upstream response message has been forwarded to the downstream"); + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ResponseDecoder::messageBegin(MessageType, int64_t, SerializationType) { + return Network::FilterStatus::Continue; +} + +Network::FilterStatus ResponseDecoder::messageEnd(MessageMetadataSharedPtr metadata) { + ASSERT(metadata->message_type() == MessageType::Response || + metadata->message_type() == MessageType::Exception); + ASSERT(metadata->response_status().has_value()); + + stats_.response_decoding_success_.inc(); + if (metadata->message_type() == MessageType::Exception) { + stats_.response_business_exception_.inc(); + } + + metadata_ = metadata; + switch (metadata->response_status().value()) { + case ResponseStatus::Ok: + stats_.response_success_.inc(); + break; + default: + stats_.response_error_.inc(); + ENVOY_LOG(error, "dubbo response status: {}", + static_cast(metadata->response_status().value())); + break; + } + + complete_ = true; + ENVOY_LOG(debug, "dubbo response: complete processing of upstream response messages, id is {}", + metadata->request_id()); + + return Network::FilterStatus::Continue; +} + +DecoderEventHandler* ResponseDecoder::newDecoderEventHandler() { return this; } + +// class ActiveMessageDecoderFilter +ActiveMessageDecoderFilter::ActiveMessageDecoderFilter(ActiveMessage& parent, + DubboFilters::DecoderFilterSharedPtr filter) + : parent_(parent), handle_(filter) {} + +uint64_t ActiveMessageDecoderFilter::requestId() const { return parent_.requestId(); } + +uint64_t ActiveMessageDecoderFilter::streamId() const { return parent_.streamId(); } + +const Network::Connection* ActiveMessageDecoderFilter::connection() const { + return parent_.connection(); +} + +void ActiveMessageDecoderFilter::continueDecoding() { + const Network::FilterStatus status = parent_.applyDecoderFilters(this); + if (status == Network::FilterStatus::Continue) { + // All filters have been executed for the current decoder state. + if (parent_.pending_transport_end()) { + // If the filter stack was paused during messageEnd, handle end-of-request details. + parent_.finalizeRequest(); + } + parent_.continueDecoding(); + } +} + +Router::RouteConstSharedPtr ActiveMessageDecoderFilter::route() { return parent_.route(); } + +SerializationType ActiveMessageDecoderFilter::downstreamSerializationType() const { + return parent_.downstreamSerializationType(); +} + +ProtocolType ActiveMessageDecoderFilter::downstreamProtocolType() const { + return parent_.downstreamProtocolType(); +} + +void ActiveMessageDecoderFilter::sendLocalReply(const DubboFilters::DirectResponse& response, + bool end_stream) { + parent_.sendLocalReply(response, end_stream); +} + +void ActiveMessageDecoderFilter::startUpstreamResponse(Deserializer& deserializer, + Protocol& protocol) { + parent_.startUpstreamResponse(deserializer, protocol); +} + +DubboFilters::UpstreamResponseStatus +ActiveMessageDecoderFilter::upstreamData(Buffer::Instance& buffer) { + return parent_.upstreamData(buffer); +} + +void ActiveMessageDecoderFilter::resetDownstreamConnection() { + parent_.resetDownstreamConnection(); +} + +void ActiveMessageDecoderFilter::resetStream() { parent_.resetStream(); } + +StreamInfo::StreamInfo& ActiveMessageDecoderFilter::streamInfo() { return parent_.streamInfo(); } + +// class ActiveMessage +ActiveMessage::ActiveMessage(ConnectionManager& parent) + : parent_(parent), request_timer_(std::make_unique( + parent_.stats().request_time_ms_, parent.time_system())), + request_id_(-1), stream_id_(parent.random_generator().random()), + stream_info_(parent.time_system()), pending_transport_end_(false), + local_response_sent_(false) { + parent_.stats().request_active_.inc(); + stream_info_.setDownstreamLocalAddress(parent_.connection().localAddress()); + stream_info_.setDownstreamRemoteAddress(parent_.connection().remoteAddress()); +} + +ActiveMessage::~ActiveMessage() { + parent_.stats().request_active_.dec(); + request_timer_->complete(); + for (auto& filter : decoder_filters_) { + filter->handler()->onDestroy(); + } + ENVOY_LOG(debug, "ActiveMessage::~ActiveMessage()"); +} + +Network::FilterStatus ActiveMessage::transportBegin() { + filter_action_ = [](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transportBegin(); + }; + + return this->applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::transportEnd() { + filter_action_ = [](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transportEnd(); + }; + + Network::FilterStatus status = applyDecoderFilters(nullptr); + if (status == Network::FilterStatus::StopIteration) { + pending_transport_end_ = true; + return status; + } + + finalizeRequest(); + + ENVOY_LOG(debug, "dubbo request: complete processing of downstream request messages, id is {}", + request_id_); + + return status; +} + +Network::FilterStatus ActiveMessage::messageBegin(MessageType type, int64_t message_id, + SerializationType serialization_type) { + request_id_ = message_id; + filter_action_ = [type, message_id, serialization_type]( + DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->messageBegin(type, message_id, serialization_type); + }; + + return applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::messageEnd(MessageMetadataSharedPtr metadata) { + ASSERT(metadata->message_type() == MessageType::Request || + metadata->message_type() == MessageType::Oneway); + + // Currently only hessian serialization is implemented. + ASSERT(metadata->serialization_type() == SerializationType::Hessian); + + ENVOY_LOG(debug, "dubbo request: start processing downstream request messages, id is {}", + metadata->request_id()); + + parent_.stats().request_decoding_success_.inc(); + + metadata_ = metadata; + filter_action_ = [metadata](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->messageEnd(metadata); + }; + + return applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::transferHeaderTo(Buffer::Instance& header_buf, size_t size) { + filter_action_ = [&header_buf, + size](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transferHeaderTo(header_buf, size); + }; + + // If a local reply is generated, the filter callback is skipped and + // the buffer data needs to be actively released. + if (local_response_sent_) { + header_buf.drain(size); + } + + return applyDecoderFilters(nullptr); +} + +Network::FilterStatus ActiveMessage::transferBodyTo(Buffer::Instance& body_buf, size_t size) { + filter_action_ = [&body_buf, size](DubboFilters::DecoderFilter* filter) -> Network::FilterStatus { + return filter->transferBodyTo(body_buf, size); + }; + + // If a local reply is generated, the filter callback is skipped and + // the buffer data needs to be actively released. + if (local_response_sent_) { + body_buf.drain(size); + } + + return applyDecoderFilters(nullptr); +} + +void ActiveMessage::finalizeRequest() { + pending_transport_end_ = false; + parent_.stats().request_.inc(); + bool is_one_way = false; + switch (metadata_->message_type()) { + case MessageType::Request: + parent_.stats().request_twoway_.inc(); + break; + case MessageType::Oneway: + parent_.stats().request_oneway_.inc(); + is_one_way = true; + break; + default: + break; + } + + if (local_response_sent_ || is_one_way) { + parent_.deferredMessage(*this); + } +} + +void ActiveMessage::createFilterChain() { + parent_.config().filterFactory().createFilterChain(*this); +} + +DubboProxy::Router::RouteConstSharedPtr ActiveMessage::route() { + if (cached_route_) { + return cached_route_.value(); + } + + if (metadata_ != nullptr) { + DubboProxy::Router::RouteConstSharedPtr route = + parent_.config().routerConfig().route(*metadata_, stream_id_); + cached_route_ = route; + return cached_route_.value(); + } + + return nullptr; +} + +Network::FilterStatus ActiveMessage::applyDecoderFilters(ActiveMessageDecoderFilter* filter) { + ASSERT(filter_action_ != nullptr); + + if (!local_response_sent_) { + std::list::iterator entry; + if (!filter) { + entry = decoder_filters_.begin(); + } else { + entry = std::next(filter->entry()); + } + + for (; entry != decoder_filters_.end(); entry++) { + const Network::FilterStatus status = filter_action_((*entry)->handler().get()); + if (local_response_sent_) { + break; + } + + if (status != Network::FilterStatus::Continue) { + return status; + } + } + } + + filter_action_ = nullptr; + + return Network::FilterStatus::Continue; +} + +void ActiveMessage::sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) { + if (!metadata_) { + // If the sendLocalReply function is called before the messageEnd callback, + // metadata_ is nullptr, metadata object needs to be created in order to generate a local reply. + metadata_ = std::make_shared(); + } + metadata_->setRequestId(request_id_); + parent_.sendLocalReply(*metadata_, response, end_stream); + + if (end_stream) { + return; + } + + local_response_sent_ = true; +} + +void ActiveMessage::startUpstreamResponse(Deserializer& deserializer, Protocol& protocol) { + ENVOY_LOG(debug, "dubbo response: start upstream"); + + ASSERT(response_decoder_ == nullptr); + + // Create a response message decoder. + response_decoder_ = std::make_unique( + response_buffer_, parent_.stats(), parent_.connection(), deserializer, protocol); +} + +DubboFilters::UpstreamResponseStatus ActiveMessage::upstreamData(Buffer::Instance& buffer) { + ASSERT(response_decoder_ != nullptr); + + try { + if (response_decoder_->onData(buffer)) { + if (requestId() != response_decoder_->requestId()) { + throw EnvoyException(fmt::format("dubbo response: request ID is not equal, {}:{}", + requestId(), response_decoder_->requestId())); + } + + // Completed upstream response. + parent_.deferredMessage(*this); + return DubboFilters::UpstreamResponseStatus::Complete; + } + return DubboFilters::UpstreamResponseStatus::MoreData; + } catch (const DownstreamConnectionCloseException& ex) { + ENVOY_CONN_LOG(error, "dubbo response: exception ({})", parent_.connection(), ex.what()); + onReset(); + parent_.stats().response_error_caused_connection_close_.inc(); + return DubboFilters::UpstreamResponseStatus::Reset; + } catch (const EnvoyException& ex) { + ENVOY_CONN_LOG(error, "dubbo response: exception ({})", parent_.connection(), ex.what()); + parent_.stats().response_decoding_error_.inc(); + + onError(ex.what()); + return DubboFilters::UpstreamResponseStatus::Reset; + } +} + +void ActiveMessage::resetDownstreamConnection() { + parent_.connection().close(Network::ConnectionCloseType::NoFlush); +} + +void ActiveMessage::resetStream() { parent_.deferredMessage(*this); } + +uint64_t ActiveMessage::requestId() const { + return metadata_ != nullptr ? metadata_->request_id() : 0; +} + +uint64_t ActiveMessage::streamId() const { return stream_id_; } + +void ActiveMessage::continueDecoding() { parent_.continueDecoding(); } + +SerializationType ActiveMessage::downstreamSerializationType() const { + return parent_.downstreamSerializationType(); +} + +ProtocolType ActiveMessage::downstreamProtocolType() const { + return parent_.downstreamProtocolType(); +} + +StreamInfo::StreamInfo& ActiveMessage::streamInfo() { return stream_info_; } + +const Network::Connection* ActiveMessage::connection() const { return &parent_.connection(); } + +void ActiveMessage::addDecoderFilter(DubboFilters::DecoderFilterSharedPtr filter) { + ActiveMessageDecoderFilterPtr wrapper = + std::make_unique(*this, filter); + filter->setDecoderFilterCallbacks(*wrapper); + wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); +} + +void ActiveMessage::onReset() { parent_.deferredMessage(*this); } + +void ActiveMessage::onError(const std::string& what) { + if (!metadata_) { + // It's possible that an error occurred before the decoder generated metadata, + // and a metadata object needs to be created in order to generate a local reply. + metadata_ = std::make_shared(); + } + + ASSERT(metadata_); + ENVOY_LOG(error, "Bad response: {}", what); + sendLocalReply(AppException(ResponseStatus::BadResponse, what), false); + parent_.deferredMessage(*this); +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.h b/source/extensions/filters/network/dubbo_proxy/active_message.h new file mode 100644 index 0000000000000..8ecda9ea74aa3 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/active_message.h @@ -0,0 +1,170 @@ +#pragma once + +#include "envoy/event/deferred_deletable.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/stream_info/stream_info_impl.h" + +#include "extensions/filters/network/dubbo_proxy/decoder.h" +#include "extensions/filters/network/dubbo_proxy/decoder_event_handler.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" +#include "extensions/filters/network/dubbo_proxy/router/router.h" +#include "extensions/filters/network/dubbo_proxy/stats.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +class ConnectionManager; +class ActiveMessage; + +class ResponseDecoder : public DecoderCallbacks, + public DecoderEventHandler, + Logger::Loggable { +public: + ResponseDecoder(Buffer::Instance& buffer, DubboFilterStats& stats, + Network::Connection& connection, Deserializer& deserializer, Protocol& protocol); + ~ResponseDecoder() override = default; + + bool onData(Buffer::Instance& data); + + // DecoderEventHandler + Network::FilterStatus transportBegin() override; + Network::FilterStatus transportEnd() override; + Network::FilterStatus messageBegin(MessageType type, int64_t message_id, + SerializationType serialization_type) override; + Network::FilterStatus messageEnd(MessageMetadataSharedPtr metadata) override; + + // DecoderCallbacks + DecoderEventHandler* newDecoderEventHandler() override; + + uint64_t requestId() const { return metadata_ ? metadata_->request_id() : 0; } + +private: + Buffer::Instance& response_buffer_; + DubboFilterStats& stats_; + Network::Connection& response_connection_; + DecoderPtr decoder_; + MessageMetadataSharedPtr metadata_; + bool complete_ : 1; +}; + +typedef std::unique_ptr ResponseDecoderPtr; + +// Wraps a DecoderFilter and acts as the DecoderFilterCallbacks for the filter, enabling filter +// chain continuation. +class ActiveMessageDecoderFilter : public DubboFilters::DecoderFilterCallbacks, + public LinkedObject { +public: + ActiveMessageDecoderFilter(ActiveMessage& parent, DubboFilters::DecoderFilterSharedPtr filter); + ~ActiveMessageDecoderFilter() override = default; + + // DubboFilters::DecoderFilterCallbacks + uint64_t requestId() const override; + uint64_t streamId() const override; + const Network::Connection* connection() const override; + void continueDecoding() override; + DubboProxy::Router::RouteConstSharedPtr route() override; + SerializationType downstreamSerializationType() const override; + ProtocolType downstreamProtocolType() const override; + void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) override; + void startUpstreamResponse(Deserializer& deserializer, Protocol& protocol) override; + DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer) override; + void resetDownstreamConnection() override; + StreamInfo::StreamInfo& streamInfo() override; + void resetStream() override; + + DubboFilters::DecoderFilterSharedPtr handler() { return handle_; } + +private: + ActiveMessage& parent_; + DubboFilters::DecoderFilterSharedPtr handle_; +}; + +typedef std::unique_ptr ActiveMessageDecoderFilterPtr; + +// ActiveMessage tracks downstream requests for which no response has been received. +class ActiveMessage : public LinkedObject, + public Event::DeferredDeletable, + public DecoderEventHandler, + public DubboFilters::DecoderFilterCallbacks, + public DubboFilters::FilterChainFactoryCallbacks, + Logger::Loggable { +public: + ActiveMessage(ConnectionManager& parent); + ~ActiveMessage() override; + + // Dubbo::FilterChainFactoryCallbacks + void addDecoderFilter(DubboFilters::DecoderFilterSharedPtr filter) override; + + // DecoderEventHandler + Network::FilterStatus transportBegin() override; + Network::FilterStatus transportEnd() override; + Network::FilterStatus messageBegin(MessageType type, int64_t message_id, + SerializationType serialization_type) override; + Network::FilterStatus messageEnd(MessageMetadataSharedPtr metadata) override; + Network::FilterStatus transferHeaderTo(Buffer::Instance& header_buf, size_t size) override; + Network::FilterStatus transferBodyTo(Buffer::Instance& body_buf, size_t size) override; + + // DubboFilters::DecoderFilterCallbacks + uint64_t requestId() const override; + uint64_t streamId() const override; + const Network::Connection* connection() const override; + void continueDecoding() override; + SerializationType downstreamSerializationType() const override; + ProtocolType downstreamProtocolType() const override; + StreamInfo::StreamInfo& streamInfo() override; + Router::RouteConstSharedPtr route() override; + void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) override; + void startUpstreamResponse(Deserializer& deserializer, Protocol& protocol) override; + DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer) override; + void resetDownstreamConnection() override; + void resetStream() override; + + void createFilterChain(); + Network::FilterStatus applyDecoderFilters(ActiveMessageDecoderFilter* filter); + void finalizeRequest(); + void onReset(); + void onError(const std::string& what); + MessageMetadataSharedPtr metadata() const { return metadata_; } + bool pending_transport_end() const { return pending_transport_end_; } + +private: + ConnectionManager& parent_; + + MessageMetadataSharedPtr metadata_; + Stats::TimespanPtr request_timer_; + ResponseDecoderPtr response_decoder_; + + absl::optional cached_route_; + + std::list decoder_filters_; + std::function filter_action_; + + int32_t request_id_; + + // This value is used in the calculation of the weighted cluster. + uint64_t stream_id_; + StreamInfo::StreamInfoImpl stream_info_; + + Buffer::OwnedImpl response_buffer_; + + bool pending_transport_end_ : 1; + bool local_response_sent_ : 1; +}; + +typedef std::unique_ptr ActiveMessagePtr; + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/app_exception.cc b/source/extensions/filters/network/dubbo_proxy/app_exception.cc index d3dd4bbc77133..8c35ce60c492a 100644 --- a/source/extensions/filters/network/dubbo_proxy/app_exception.cc +++ b/source/extensions/filters/network/dubbo_proxy/app_exception.cc @@ -38,6 +38,9 @@ AppException::ResponseType AppException::encode(MessageMetadata& metadata, return DirectResponse::ResponseType::Exception; } +DownstreamConnectionCloseException::DownstreamConnectionCloseException(const std::string& what) + : EnvoyException(what) {} + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/dubbo_proxy/app_exception.h b/source/extensions/filters/network/dubbo_proxy/app_exception.h index 31eba8b55a620..ae68fb47d5935 100644 --- a/source/extensions/filters/network/dubbo_proxy/app_exception.h +++ b/source/extensions/filters/network/dubbo_proxy/app_exception.h @@ -26,6 +26,10 @@ struct AppException : public EnvoyException, const RpcResponseType response_type_; }; +struct DownstreamConnectionCloseException : public EnvoyException { + DownstreamConnectionCloseException(const std::string& what); +}; + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/dubbo_proxy/buffer_helper.h b/source/extensions/filters/network/dubbo_proxy/buffer_helper.h index 3725621a9e12b..d3020c39c0f96 100644 --- a/source/extensions/filters/network/dubbo_proxy/buffer_helper.h +++ b/source/extensions/filters/network/dubbo_proxy/buffer_helper.h @@ -10,62 +10,6 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -/** - * BufferWrapper provides a partial implementation of Buffer::Instance that is sufficient for - * BufferHelper to read protocol data without draining the buffer's contents. - */ -class BufferWrapper : public Buffer::Instance { -public: - BufferWrapper(Buffer::Instance& underlying) : underlying_(underlying) {} - - uint64_t position() { return position_; } - - // Buffer::Instance - void copyOut(size_t start, uint64_t size, void* data) const override { - ASSERT(position_ + start + size <= underlying_.length()); - underlying_.copyOut(start + position_, size, data); - } - void drain(uint64_t size) override { - ASSERT(position_ + size <= underlying_.length()); - position_ += size; - } - uint64_t length() const override { - ASSERT(underlying_.length() >= position_); - return underlying_.length() - position_; - } - void* linearize(uint32_t size) override { - ASSERT(position_ + size <= underlying_.length()); - uint8_t* p = static_cast(underlying_.linearize(position_ + size)); - return p + position_; - } - - std::string toString() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void add(const void*, uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void addBufferFragment(Buffer::BufferFragment&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void add(absl::string_view) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void add(const Buffer::Instance&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void commit(Buffer::RawSlice*, uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void prepend(absl::string_view) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void prepend(Instance&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - uint64_t getRawSlices(Buffer::RawSlice*, uint64_t) const override { - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - } - void move(Buffer::Instance&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void move(Buffer::Instance&, uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - Api::IoCallUint64Result read(Network::IoHandle&, uint64_t) override { - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - } - uint64_t reserve(uint64_t, Buffer::RawSlice*, uint64_t) override { - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - } - ssize_t search(const void*, uint64_t, size_t) const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - Api::IoCallUint64Result write(Network::IoHandle&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - -private: - Buffer::Instance& underlying_; - uint64_t position_{0}; -}; - /** * BufferHelper provides buffer operations for reading bytes and numbers in the various encodings * used by protocols. diff --git a/source/extensions/filters/network/dubbo_proxy/config.cc b/source/extensions/filters/network/dubbo_proxy/config.cc index ae228be2804f4..0d83fc2852e70 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.cc +++ b/source/extensions/filters/network/dubbo_proxy/config.cc @@ -2,7 +2,12 @@ #include "envoy/registry/registry.h" -#include "extensions/filters/network/dubbo_proxy/filter.h" +#include "common/config/utility.h" + +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" +#include "extensions/filters/network/dubbo_proxy/filters/factory_base.h" +#include "extensions/filters/network/dubbo_proxy/filters/well_known_names.h" +#include "extensions/filters/network/dubbo_proxy/stats.h" namespace Envoy { namespace Extensions { @@ -12,18 +17,11 @@ namespace DubboProxy { Network::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromProtoTyped( const envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy& proto_config, Server::Configuration::FactoryContext& context) { - ASSERT(!proto_config.stat_prefix().empty()); - - const std::string stat_prefix = fmt::format("dubbo.{}.", proto_config.stat_prefix()); + std::shared_ptr filter_config(std::make_shared(proto_config, context)); - Filter::ConfigProtocolType protocol_type = proto_config.protocol_type(); - Filter::ConfigSerializationType serialization_type = proto_config.serialization_type(); - - return [stat_prefix, protocol_type, serialization_type, - &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addFilter(std::make_shared(stat_prefix, protocol_type, - serialization_type, context.scope(), - context.dispatcher().timeSource())); + return [filter_config, &context](Network::FilterManager& filter_manager) -> void { + filter_manager.addReadFilter(std::make_shared( + *filter_config, context.random(), context.dispatcher().timeSource())); }; } @@ -33,6 +31,109 @@ Network::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromP REGISTER_FACTORY(DubboProxyFilterConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory); +class ProtocolTypeMapper { +public: + using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; + typedef absl::flat_hash_map ProtocolTypeMap; + + static ProtocolType lookupProtocolType(ConfigProtocolType config_type) { + const auto& iter = protocolTypeMap().find(config_type); + ASSERT(iter != protocolTypeMap().end()); + return iter->second; + } + +private: + static const ProtocolTypeMap& protocolTypeMap() { + CONSTRUCT_ON_FIRST_USE(ProtocolTypeMap, { + {ConfigProtocolType::Dubbo, ProtocolType::Dubbo}, + }); + } +}; + +class SerializationTypeMapper { +public: + using ConfigSerializationType = + envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; + typedef absl::flat_hash_map SerializationTypeMap; + + static SerializationType lookupSerializationType(ConfigSerializationType type) { + const auto& iter = serializationTypeMap().find(type); + ASSERT(iter != serializationTypeMap().end()); + return iter->second; + } + +private: + static const SerializationTypeMap& serializationTypeMap() { + CONSTRUCT_ON_FIRST_USE(SerializationTypeMap, + { + {ConfigSerializationType::Hessian2, SerializationType::Hessian}, + }); + } +}; + +// class ConfigImpl. +ConfigImpl::ConfigImpl(const DubboProxyConfig& config, + Server::Configuration::FactoryContext& context) + : context_(context), stats_prefix_(fmt::format("dubbo.{}.", config.stat_prefix())), + stats_(DubboFilterStats::generateStats(stats_prefix_, context_.scope())), + serialization_type_( + SerializationTypeMapper::lookupSerializationType(config.serialization_type())), + protocol_type_(ProtocolTypeMapper::lookupProtocolType(config.protocol_type())), + route_matcher_(std::make_unique(config.route_config())) { + if (config.dubbo_filters().empty()) { + ENVOY_LOG(debug, "using default router filter"); + + envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboFilter router_config; + router_config.set_name(DubboFilters::DubboFilterNames::get().ROUTER); + registerFilter(router_config); + } else { + for (const auto& filter_config : config.dubbo_filters()) { + registerFilter(filter_config); + } + } +} + +void ConfigImpl::createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) { + for (const DubboFilters::FilterFactoryCb& factory : filter_factories_) { + factory(callbacks); + } +} + +Router::RouteConstSharedPtr ConfigImpl::route(const MessageMetadata& metadata, + uint64_t random_value) const { + return route_matcher_->route(metadata, random_value); +} + +ProtocolPtr ConfigImpl::createProtocol() { + return NamedProtocolConfigFactory::getFactory(protocol_type_).createProtocol(); +} + +DeserializerPtr ConfigImpl::createDeserializer() { + return NamedDeserializerConfigFactory::getFactory(serialization_type_).createDeserializer(); +} + +void ConfigImpl::registerFilter(const DubboFilterConfig& proto_config) { + const ProtobufTypes::String& string_name = proto_config.name(); + + ENVOY_LOG(debug, " dubbo filter #{}", filter_factories_.size()); + ENVOY_LOG(debug, " name: {}", string_name); + + const Json::ObjectSharedPtr filter_config = + MessageUtil::getJsonObjectFromMessage(proto_config.config()); + ENVOY_LOG(debug, " config: {}", filter_config->asJsonString()); + + auto& factory = + Envoy::Config::Utility::getAndCheckFactory( + string_name); + ProtobufTypes::MessagePtr message = factory.createEmptyConfigProto(); + Envoy::Config::Utility::translateOpaqueConfig(proto_config.config(), + ProtobufWkt::Struct::default_instance(), *message); + DubboFilters::FilterFactoryCb callback = + factory.createFilterFactoryFromProto(*message, stats_prefix_, context_); + + filter_factories_.push_back(callback); +} + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/dubbo_proxy/config.h b/source/extensions/filters/network/dubbo_proxy/config.h index 0963ce0019494..076298217bf87 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.h +++ b/source/extensions/filters/network/dubbo_proxy/config.h @@ -1,9 +1,15 @@ #pragma once +#include + #include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" #include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" #include "extensions/filters/network/common/factory_base.h" +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/router/route_matcher.h" +#include "extensions/filters/network/dubbo_proxy/router/router_impl.h" #include "extensions/filters/network/well_known_names.h" namespace Envoy { @@ -26,6 +32,44 @@ class DubboProxyFilterConfigFactory Server::Configuration::FactoryContext& context) override; }; +class ConfigImpl : public Config, + public Router::Config, + public DubboFilters::FilterChainFactory, + Logger::Loggable { +public: + using DubboProxyConfig = envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy; + using DubboFilterConfig = envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboFilter; + + ConfigImpl(const DubboProxyConfig& config, Server::Configuration::FactoryContext& context); + ~ConfigImpl() override = default; + + // DubboFilters::FilterChainFactory + void createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) override; + + // Router::Config + Router::RouteConstSharedPtr route(const MessageMetadata& metadata, + uint64_t random_value) const override; + + // Config + DubboFilterStats& stats() override { return stats_; } + DubboFilters::FilterChainFactory& filterFactory() override { return *this; } + Router::Config& routerConfig() override { return *this; } + ProtocolPtr createProtocol() override; + DeserializerPtr createDeserializer() override; + +private: + void registerFilter(const DubboFilterConfig& proto_config); + + Server::Configuration::FactoryContext& context_; + const std::string stats_prefix_; + DubboFilterStats stats_; + const SerializationType serialization_type_; + const ProtocolType protocol_type_; + std::unique_ptr route_matcher_; + + std::list filter_factories_; +}; + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc new file mode 100644 index 0000000000000..94f935df3ca64 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc @@ -0,0 +1,208 @@ +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" + +#include + +#include "envoy/common/exception.h" + +#include "common/common/fmt.h" + +#include "extensions/filters/network/dubbo_proxy/app_exception.h" +#include "extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h" +#include "extensions/filters/network/dubbo_proxy/heartbeat_response.h" +#include "extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +constexpr uint32_t BufferLimit = UINT32_MAX; + +ConnectionManager::ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + TimeSource& time_system) + : config_(config), time_system_(time_system), stats_(config_.stats()), + random_generator_(random_generator), deserializer_(config.createDeserializer()), + protocol_(config.createProtocol()), + decoder_(std::make_unique(*protocol_.get(), *deserializer_.get(), *this)) {} + +Network::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end_stream) { + ENVOY_LOG(trace, "dubbo: read {} bytes", data.length()); + request_buffer_.move(data); + dispatch(); + + if (end_stream) { + ENVOY_CONN_LOG(trace, "downstream half-closed", read_callbacks_->connection()); + + // Downstream has closed. Unless we're waiting for an upstream connection to complete a oneway + // request, close. The special case for oneway requests allows them to complete before the + // ConnectionManager is destroyed. + if (stopped_) { + ASSERT(!active_message_list_.empty()); + auto metadata = (*active_message_list_.begin())->metadata(); + if (metadata && metadata->message_type() == MessageType::Oneway) { + ENVOY_CONN_LOG(trace, "waiting for one-way completion", read_callbacks_->connection()); + half_closed_ = true; + return Network::FilterStatus::StopIteration; + } + } + + ENVOY_LOG(debug, "dubbo: end data processing"); + resetAllMessages(false); + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } + + return Network::FilterStatus::StopIteration; +} + +Network::FilterStatus ConnectionManager::onNewConnection() { + return Network::FilterStatus::Continue; +} + +void ConnectionManager::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { + read_callbacks_ = &callbacks; + read_callbacks_->connection().addConnectionCallbacks(*this); + read_callbacks_->connection().enableHalfClose(true); + read_callbacks_->connection().setBufferLimits(BufferLimit); +} + +void ConnectionManager::onEvent(Network::ConnectionEvent event) { + resetAllMessages(event == Network::ConnectionEvent::LocalClose); +} + +void ConnectionManager::onAboveWriteBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "onAboveWriteBufferHighWatermark", read_callbacks_->connection()); + read_callbacks_->connection().readDisable(true); +} + +void ConnectionManager::onBelowWriteBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "onBelowWriteBufferLowWatermark", read_callbacks_->connection()); + read_callbacks_->connection().readDisable(false); +} + +DecoderEventHandler* ConnectionManager::newDecoderEventHandler() { + ENVOY_LOG(debug, "dubbo: create the new docoder event handler"); + + ActiveMessagePtr new_message(std::make_unique(*this)); + new_message->createFilterChain(); + new_message->moveIntoList(std::move(new_message), active_message_list_); + return (*active_message_list_.begin()).get(); +} + +void ConnectionManager::onHeartbeat(MessageMetadataSharedPtr metadata) { + stats_.request_event_.inc(); + + if (read_callbacks_->connection().state() != Network::Connection::State::Open) { + ENVOY_LOG(warn, "dubbo: downstream connection is closed or closing"); + return; + } + + metadata->setResponseStatus(ResponseStatus::Ok); + metadata->setMessageType(MessageType::Response); + metadata->setEventFlag(true); + + HeartbeatResponse heartbeat; + Buffer::OwnedImpl response_buffer; + heartbeat.encode(*metadata, *protocol_, *deserializer_, response_buffer); + + read_callbacks_->connection().write(response_buffer, false); +} + +void ConnectionManager::dispatch() { + if (0 == request_buffer_.length()) { + ENVOY_LOG(warn, "dubbo: it's empty data"); + return; + } + + if (stopped_) { + ENVOY_CONN_LOG(debug, "dubbo: dubbo filter stopped", read_callbacks_->connection()); + return; + } + + try { + bool underflow = false; + while (!underflow) { + Network::FilterStatus status = decoder_->onData(request_buffer_, underflow); + if (status == Network::FilterStatus::StopIteration) { + stopped_ = true; + break; + } + } + return; + } catch (const EnvoyException& ex) { + ENVOY_CONN_LOG(error, "dubbo error: {}", read_callbacks_->connection(), ex.what()); + read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); + stats_.request_decoding_error_.inc(); + } + resetAllMessages(true); +} + +void ConnectionManager::sendLocalReply(MessageMetadata& metadata, + const DubboFilters::DirectResponse& response, + bool end_stream) { + if (read_callbacks_->connection().state() != Network::Connection::State::Open) { + return; + } + + Buffer::OwnedImpl buffer; + const DubboFilters::DirectResponse::ResponseType result = + response.encode(metadata, *protocol_, *deserializer_, buffer); + read_callbacks_->connection().write(buffer, end_stream); + + if (end_stream) { + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } + + switch (result) { + case DubboFilters::DirectResponse::ResponseType::SuccessReply: + stats_.local_response_success_.inc(); + break; + case DubboFilters::DirectResponse::ResponseType::ErrorReply: + stats_.local_response_error_.inc(); + break; + case DubboFilters::DirectResponse::ResponseType::Exception: + stats_.local_response_business_exception_.inc(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +void ConnectionManager::continueDecoding() { + ENVOY_CONN_LOG(debug, "dubbo filter continued", read_callbacks_->connection()); + stopped_ = false; + dispatch(); + + if (!stopped_ && half_closed_) { + // If we're half closed, but not stopped waiting for an upstream, + // reset any pending rpcs and close the connection. + resetAllMessages(false); + read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + } +} + +void ConnectionManager::deferredMessage(ActiveMessage& message) { + if (!message.inserted()) { + return; + } + read_callbacks_->connection().dispatcher().deferredDelete( + message.removeFromList(active_message_list_)); +} + +void ConnectionManager::resetAllMessages(bool local_reset) { + while (!active_message_list_.empty()) { + if (local_reset) { + ENVOY_CONN_LOG(debug, "local close with active request", read_callbacks_->connection()); + stats_.cx_destroy_local_with_active_rq_.inc(); + } else { + ENVOY_CONN_LOG(debug, "remote close with active request", read_callbacks_->connection()); + stats_.cx_destroy_remote_with_active_rq_.inc(); + } + + active_message_list_.front()->onReset(); + } +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.h b/source/extensions/filters/network/dubbo_proxy/conn_manager.h new file mode 100644 index 0000000000000..c46417862b8c6 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.h @@ -0,0 +1,107 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/stats/timespan.h" + +#include "common/common/logger.h" + +#include "extensions/filters/network/dubbo_proxy/active_message.h" +#include "extensions/filters/network/dubbo_proxy/decoder.h" +#include "extensions/filters/network/dubbo_proxy/decoder_event_handler.h" +#include "extensions/filters/network/dubbo_proxy/deserializer.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/protocol.h" +#include "extensions/filters/network/dubbo_proxy/stats.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +/** + * Config is a configuration interface for ConnectionManager. + */ +class Config { +public: + virtual ~Config() = default; + + virtual DubboFilters::FilterChainFactory& filterFactory() PURE; + virtual DubboFilterStats& stats() PURE; + virtual ProtocolPtr createProtocol() PURE; + virtual DeserializerPtr createDeserializer() PURE; + virtual Router::Config& routerConfig() PURE; +}; + +// class ActiveMessagePtr; +class ConnectionManager : public Network::ReadFilter, + public Network::ConnectionCallbacks, + public DecoderCallbacks, + Logger::Loggable { +public: + using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; + using ConfigSerializationType = + envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; + + ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + TimeSource& time_system); + ~ConnectionManager() override = default; + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + + // DecoderCallbacks + DecoderEventHandler* newDecoderEventHandler() override; + void onHeartbeat(MessageMetadataSharedPtr metadata) override; + + DubboFilterStats& stats() const { return stats_; } + Network::Connection& connection() const { return read_callbacks_->connection(); } + TimeSource& time_system() const { return time_system_; } + Runtime::RandomGenerator& random_generator() const { return random_generator_; } + Config& config() const { return config_; } + SerializationType downstreamSerializationType() const { return deserializer_->type(); } + ProtocolType downstreamProtocolType() const { return protocol_->type(); } + + void continueDecoding(); + void deferredMessage(ActiveMessage& message); + void sendLocalReply(MessageMetadata& metadata, const DubboFilters::DirectResponse& response, + bool end_stream); + +private: + void dispatch(); + void resetAllMessages(bool local_reset); + + Buffer::OwnedImpl request_buffer_; + std::list active_message_list_; + + bool stopped_{false}; + bool half_closed_{false}; + + Config& config_; + TimeSource& time_system_; + DubboFilterStats& stats_; + Runtime::RandomGenerator& random_generator_; + + DeserializerPtr deserializer_; + ProtocolPtr protocol_; + DecoderPtr decoder_; + Network::ReadFilterCallbacks* read_callbacks_{}; +}; + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.cc b/source/extensions/filters/network/dubbo_proxy/decoder.cc index d5f99b97f1a4b..c5be03feabbed 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.cc +++ b/source/extensions/filters/network/dubbo_proxy/decoder.cc @@ -2,47 +2,179 @@ #include "common/common/macros.h" +#include "extensions/filters/network/dubbo_proxy/heartbeat_response.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -Decoder::Decoder(ProtocolPtr&& protocol, DeserializerPtr&& deserializer, - DecoderCallbacks& decoder_callbacks) - : deserializer_(std::move(deserializer)), protocol_(std::move(protocol)), - decoder_callbacks_(decoder_callbacks) {} - -void Decoder::onData(Buffer::Instance& data) { - ENVOY_LOG(debug, "dubbo: {} bytes available", data.length()); - while (true) { - if (!decode_ended_) { - if (!protocol_->decode(data, &context_)) { - ENVOY_LOG(debug, "dubbo: need more data for {} protocol", protocol_->name()); - return; - } - - decode_ended_ = true; - ENVOY_LOG(debug, "dubbo: {} protocol decode ended", protocol_->name()); +DecoderStateMachine::DecoderStatus +DecoderStateMachine::onTransportBegin(Buffer::Instance& buffer, Protocol::Context& context) { + if (!protocol_.decode(buffer, &context, metadata_)) { + ENVOY_LOG(debug, "dubbo decoder: need more data for {} protocol", protocol_.name()); + return DecoderStatus(ProtocolState::WaitForData); + } + + if (context.is_heartbeat_) { + ENVOY_LOG(debug, "dubbo decoder: this is the {} heartbeat message", protocol_.name()); + buffer.drain(context.header_size_); + decoder_callbacks_.onHeartbeat(metadata_); + return DecoderStatus(ProtocolState::Done, Network::FilterStatus::Continue); + } else { + handler_ = decoder_callbacks_.newDecoderEventHandler(); + } + return DecoderStatus(ProtocolState::OnTransferHeaderTo, handler_->transportBegin()); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onTransportEnd() { + ENVOY_LOG(debug, "dubbo decoder: complete protocol processing"); + return DecoderStatus(ProtocolState::Done, handler_->transportEnd()); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onTransferHeaderTo(Buffer::Instance& buffer, + size_t length) { + ENVOY_LOG(debug, "dubbo decoder: transfer protocol header, buffer size {}, header size {}", + buffer.length(), length); + return DecoderStatus(ProtocolState::OnMessageBegin, handler_->transferHeaderTo(buffer, length)); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onTransferBodyTo(Buffer::Instance& buffer, + int32_t length) { + ENVOY_LOG(debug, "dubbo decoder: transfer protocol body, buffer size {}, body size {}", + buffer.length(), length); + return DecoderStatus(ProtocolState::OnTransportEnd, handler_->transferBodyTo(buffer, length)); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onMessageBegin() { + ENVOY_LOG(debug, "dubbo decoder: start deserializing messages, deserializer name {}", + deserializer_.name()); + return DecoderStatus(ProtocolState::OnMessageEnd, + handler_->messageBegin(metadata_->message_type(), metadata_->request_id(), + metadata_->serialization_type())); +} + +DecoderStateMachine::DecoderStatus DecoderStateMachine::onMessageEnd(Buffer::Instance& buffer, + int32_t message_size) { + ENVOY_LOG(debug, "dubbo decoder: expected body size is {}", message_size); + + if (buffer.length() < static_cast(message_size)) { + ENVOY_LOG(debug, "dubbo decoder: need more data for {} deserialization, current size {}", + deserializer_.name(), buffer.length()); + return DecoderStatus(ProtocolState::WaitForData); + } + + switch (metadata_->message_type()) { + case MessageType::Oneway: + case MessageType::Request: + deserializer_.deserializeRpcInvocation(buffer, message_size, metadata_); + break; + case MessageType::Response: { + auto info = deserializer_.deserializeRpcResult(buffer, message_size); + if (info->hasException()) { + metadata_->setMessageType(MessageType::Exception); } + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + ENVOY_LOG(debug, "dubbo decoder: ends the deserialization of the message"); + return DecoderStatus(ProtocolState::OnTransferBodyTo, handler_->messageEnd(metadata_)); +} - ENVOY_LOG(debug, "dubbo: expected body size is {}", context_.body_size_); +DecoderStateMachine::DecoderStatus DecoderStateMachine::handleState(Buffer::Instance& buffer) { + switch (state_) { + case ProtocolState::OnTransportBegin: + return onTransportBegin(buffer, context_); + case ProtocolState::OnTransferHeaderTo: + return onTransferHeaderTo(buffer, context_.header_size_); + case ProtocolState::OnMessageBegin: + return onMessageBegin(); + case ProtocolState::OnMessageEnd: + return onMessageEnd(buffer, context_.body_size_); + case ProtocolState::OnTransferBodyTo: + return onTransferBodyTo(buffer, context_.body_size_); + case ProtocolState::OnTransportEnd: + return onTransportEnd(); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +ProtocolState DecoderStateMachine::run(Buffer::Instance& buffer) { + while (state_ != ProtocolState::Done) { + ENVOY_LOG(trace, "dubbo decoder: state {}, {} bytes available", + ProtocolStateNameValues::name(state_), buffer.length()); - if (data.length() < context_.body_size_) { - ENVOY_LOG(debug, "dubbo: need more data for {} deserialization", deserializer_->name()); - return; + DecoderStatus s = handleState(buffer); + if (s.next_state_ == ProtocolState::WaitForData) { + return ProtocolState::WaitForData; } - if (context_.is_request_) { - decoder_callbacks_.onRpcInvocation( - deserializer_->deserializeRpcInvocation(data, context_.body_size_)); - ENVOY_LOG(debug, "dubbo: {} RpcInvocation deserialize ended", deserializer_->name()); - } else { - decoder_callbacks_.onRpcResult( - deserializer_->deserializeRpcResult(data, context_.body_size_)); - ENVOY_LOG(debug, "dubbo: {} RpcResult deserialize ended", deserializer_->name()); + state_ = s.next_state_; + + ASSERT(s.filter_status_.has_value()); + if (s.filter_status_.value() == Network::FilterStatus::StopIteration) { + return ProtocolState::StopIteration; } - decode_ended_ = false; } + + return state_; +} + +typedef std::unique_ptr DecoderStateMachinePtr; + +Decoder::Decoder(Protocol& protocol, Deserializer& deserializer, + DecoderCallbacks& decoder_callbacks) + : deserializer_(deserializer), protocol_(protocol), decoder_callbacks_(decoder_callbacks) {} + +Network::FilterStatus Decoder::onData(Buffer::Instance& data, bool& buffer_underflow) { + ENVOY_LOG(debug, "dubbo decoder: {} bytes available", data.length()); + buffer_underflow = false; + + if (!decode_started_) { + start(); + } + + ASSERT(state_machine_ != nullptr); + + ENVOY_LOG(debug, "dubbo decoder: protocol {}, state {}, {} bytes available", protocol_.name(), + ProtocolStateNameValues::name(state_machine_->currentState()), data.length()); + + ProtocolState rv = state_machine_->run(data); + switch (rv) { + case ProtocolState::WaitForData: + ENVOY_LOG(debug, "dubbo decoder: wait for data"); + buffer_underflow = true; + return Network::FilterStatus::Continue; + case ProtocolState::StopIteration: + ENVOY_LOG(debug, "dubbo decoder: wait for continuation"); + return Network::FilterStatus::StopIteration; + default: + break; + } + + ASSERT(rv == ProtocolState::Done); + + complete(); + buffer_underflow = (data.length() == 0); + ENVOY_LOG(debug, "dubbo decoder: data length {}", data.length()); + return Network::FilterStatus::Continue; +} + +void Decoder::start() { + metadata_ = std::make_shared(); + state_machine_ = std::make_unique(protocol_, deserializer_, metadata_, + decoder_callbacks_); + decode_started_ = true; +} + +void Decoder::complete() { + metadata_.reset(); + state_machine_.reset(); + decode_started_ = false; } } // namespace DubboProxy diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.h b/source/extensions/filters/network/dubbo_proxy/decoder.h index b311877e71198..71bde4016c665 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.h +++ b/source/extensions/filters/network/dubbo_proxy/decoder.h @@ -5,6 +5,7 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "extensions/filters/network/dubbo_proxy/decoder_event_handler.h" #include "extensions/filters/network/dubbo_proxy/deserializer.h" #include "extensions/filters/network/dubbo_proxy/protocol.h" @@ -13,20 +14,108 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -class DecoderCallbacks { +#define ALL_PROTOCOL_STATES(FUNCTION) \ + FUNCTION(StopIteration) \ + FUNCTION(WaitForData) \ + FUNCTION(OnTransportBegin) \ + FUNCTION(OnTransportEnd) \ + FUNCTION(OnMessageBegin) \ + FUNCTION(OnMessageEnd) \ + FUNCTION(OnTransferHeaderTo) \ + FUNCTION(OnTransferBodyTo) \ + FUNCTION(Done) + +/** + * ProtocolState represents a set of states used in a state machine to decode Dubbo requests + * and responses. + */ +enum class ProtocolState { ALL_PROTOCOL_STATES(GENERATE_ENUM) }; + +class ProtocolStateNameValues { +public: + static const std::string& name(ProtocolState state) { + size_t i = static_cast(state); + ASSERT(i < names().size()); + return names()[i]; + } + +private: + static const std::vector& names() { + CONSTRUCT_ON_FIRST_USE(std::vector, {ALL_PROTOCOL_STATES(GENERATE_STRING)}); + } +}; + +class DecoderStateMachine : public Logger::Loggable { public: - virtual ~DecoderCallbacks() {} - virtual void onRpcInvocation(RpcInvocationPtr&& invo) PURE; - virtual void onRpcResult(RpcResultPtr&& res) PURE; + DecoderStateMachine(Protocol& protocol, Deserializer& deserializer, + MessageMetadataSharedPtr& metadata, DecoderCallbacks& decoder_callbacks) + : protocol_(protocol), deserializer_(deserializer), metadata_(metadata), + decoder_callbacks_(decoder_callbacks), state_(ProtocolState::OnTransportBegin) {} + + /** + * Consumes as much data from the configured Buffer as possible and executes the decoding state + * machine. Returns ProtocolState::WaitForData if more data is required to complete processing of + * a message. Returns ProtocolState::Done when the end of a message is successfully processed. + * Once the Done state is reached, further invocations of run return immediately with Done. + * + * @param buffer a buffer containing the remaining data to be processed + * @return ProtocolState returns with ProtocolState::WaitForData or ProtocolState::Done + * @throw Envoy Exception if thrown by the underlying Protocol + */ + ProtocolState run(Buffer::Instance& buffer); + + /** + * @return the current ProtocolState + */ + ProtocolState currentState() const { return state_; } + + /** + * Set the current state. Used for testing only. + */ + void setCurrentState(ProtocolState state) { state_ = state; } + +private: + struct DecoderStatus { + DecoderStatus() = default; + DecoderStatus(ProtocolState next_state) : next_state_(next_state), filter_status_{} {}; + DecoderStatus(ProtocolState next_state, Network::FilterStatus filter_status) + : next_state_(next_state), filter_status_(filter_status){}; + + ProtocolState next_state_; + absl::optional filter_status_; + }; + + // These functions map directly to the matching ProtocolState values. Each returns the next state + // or ProtocolState::WaitForData if more data is required. + DecoderStatus onTransportBegin(Buffer::Instance& buffer, Protocol::Context& context); + DecoderStatus onTransportEnd(); + DecoderStatus onTransferHeaderTo(Buffer::Instance& buffer, size_t length); + DecoderStatus onTransferBodyTo(Buffer::Instance& buffer, int32_t length); + DecoderStatus onMessageBegin(); + DecoderStatus onMessageEnd(Buffer::Instance& buffer, int32_t message_size); + + // handleState delegates to the appropriate method based on state_. + DecoderStatus handleState(Buffer::Instance& buffer); + + Protocol& protocol_; + Deserializer& deserializer_; + MessageMetadataSharedPtr metadata_; + DecoderCallbacks& decoder_callbacks_; + + ProtocolState state_; + Protocol::Context context_; + + DecoderEventHandler* handler_; }; +typedef std::unique_ptr DecoderStateMachinePtr; + /** * Decoder encapsulates a configured and ProtocolPtr and SerializationPtr. */ class Decoder : public Logger::Loggable { public: - Decoder(ProtocolPtr&& protocol, DeserializerPtr&& deserializer, - DecoderCallbacks& decoder_callbacks); + Decoder(Protocol& protocol, Deserializer& deserializer, DecoderCallbacks& decoder_callbacks); /** * Drains data from the given buffer @@ -34,16 +123,20 @@ class Decoder : public Logger::Loggable { * @param data a Buffer containing Dubbo protocol data * @throw EnvoyException on Dubbo protocol errors */ - void onData(Buffer::Instance& data); + Network::FilterStatus onData(Buffer::Instance& data, bool& buffer_underflow); - const Deserializer& serializer() { return *deserializer_; } - const Protocol& protocol() { return *protocol_; } + const Deserializer& serializer() { return deserializer_; } + const Protocol& protocol() { return protocol_; } private: - DeserializerPtr deserializer_; - ProtocolPtr protocol_; - bool decode_ended_ = false; - Protocol::Context context_; + void start(); + void complete(); + + MessageMetadataSharedPtr metadata_; + Deserializer& deserializer_; + Protocol& protocol_; + DecoderStateMachinePtr state_machine_; + bool decode_started_ = false; DecoderCallbacks& decoder_callbacks_; }; diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer.h b/source/extensions/filters/network/dubbo_proxy/deserializer.h index bc67abcb3bc8a..2a153ecc23f31 100644 --- a/source/extensions/filters/network/dubbo_proxy/deserializer.h +++ b/source/extensions/filters/network/dubbo_proxy/deserializer.h @@ -10,6 +10,7 @@ #include "common/singleton/const_singleton.h" #include "extensions/filters/network/dubbo_proxy/message.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" namespace Envoy { namespace Extensions { @@ -30,7 +31,6 @@ class DeserializerNameValues { const DeserializerTypeNameMap deserializerTypeNameMap = { {SerializationType::Hessian, "hessian"}, - {SerializationType::Json, "json"}, }; const std::string& fromType(SerializationType type) const { @@ -52,7 +52,7 @@ typedef ConstSingleton DeserializerNames; */ class RpcInvocation { public: - virtual ~RpcInvocation() {} + virtual ~RpcInvocation() = default; virtual const std::string& getMethodName() const PURE; virtual const std::string& getServiceName() const PURE; virtual const std::string& getServiceVersion() const PURE; @@ -67,7 +67,7 @@ typedef std::unique_ptr RpcInvocationPtr; */ class RpcResult { public: - virtual ~RpcResult() {} + virtual ~RpcResult() = default; virtual bool hasException() const PURE; }; @@ -75,7 +75,7 @@ typedef std::unique_ptr RpcResultPtr; class Deserializer { public: - virtual ~Deserializer() {} + virtual ~Deserializer() = default; /** * Return this Deserializer's name * @@ -96,8 +96,8 @@ class Deserializer { * @body_size the complete RpcInvocation size * @throws EnvoyException if the data is not valid for this serialization */ - virtual RpcInvocationPtr deserializeRpcInvocation(Buffer::Instance& buffer, - size_t body_size) PURE; + virtual void deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, + MessageMetadataSharedPtr metadata) PURE; /** * deserialize result of an rpc call * If successful, the RpcResult removed from the buffer @@ -129,7 +129,7 @@ typedef std::unique_ptr DeserializerPtr; */ class NamedDeserializerConfigFactory { public: - virtual ~NamedDeserializerConfigFactory() {} + virtual ~NamedDeserializerConfigFactory() = default; /** * Create a particular Dubbo deserializer. diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc index 353655af5f4ae..985c0d32fd977 100644 --- a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.cc @@ -3,15 +3,7 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace DubboProxy { - -RpcInvocationImpl::~RpcInvocationImpl() {} -RpcInvocationImpl::RpcInvocationImpl(const std::string& method_name, - const std::string& service_name, - const std::string& service_version) - : method_name_(method_name), service_name_(service_name), service_version_(service_version) {} - -} // namespace DubboProxy +namespace DubboProxy {} // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h index a0f962c3cce13..252143c3454c7 100644 --- a/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/deserializer_impl.h @@ -7,21 +7,6 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -class RpcInvocationImpl : public RpcInvocation { -public: - ~RpcInvocationImpl(); - RpcInvocationImpl(const std::string& method_name, const std::string& service_name, - const std::string& service_version); - virtual const std::string& getMethodName() const override { return method_name_; } - virtual const std::string& getServiceName() const override { return service_name_; } - virtual const std::string& getServiceVersion() const override { return service_version_; } - -private: - std::string method_name_; - std::string service_name_; - std::string service_version_; -}; - class RpcResultImpl : public RpcResult { public: RpcResultImpl() {} diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc index a988c34b16c95..f7b6f20a73f7e 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc @@ -83,70 +83,6 @@ void parseResponseInfoFromBuffer(Buffer::Instance& buffer, MessageMetadataShared metadata->setResponseStatus(status); } -void RequestMessageImpl::fromBuffer(Buffer::Instance& data) { - ASSERT(data.length() >= DubboProtocolImpl::MessageSize); - uint8_t flag = data.peekInt(FlagOffset); - is_two_way_ = (flag & TwoWayMask) == TwoWayMask ? true : false; - type_ = static_cast(flag & SerializationTypeMask); - if (!isValidSerializationType(type_)) { - throw EnvoyException( - fmt::format("invalid dubbo message serialization type {}", - static_cast::type>(type_))); - } -} - -void ResponseMessageImpl::fromBuffer(Buffer::Instance& buffer) { - ASSERT(buffer.length() >= DubboProtocolImpl::MessageSize); - status_ = static_cast(buffer.peekInt(StatusOffset)); - if (!isValidResponseStatus(status_)) { - throw EnvoyException( - fmt::format("invalid dubbo message response status {}", - static_cast::type>(status_))); - } -} - -bool DubboProtocolImpl::decode(Buffer::Instance& buffer, Protocol::Context* context) { - ASSERT(callbacks_); - - if (buffer.length() < DubboProtocolImpl::MessageSize) { - return false; - } - - uint16_t magic_number = buffer.peekBEInt(); - if (magic_number != MagicNumber) { - throw EnvoyException(fmt::format("invalid dubbo message magic number {}", magic_number)); - } - - uint8_t flag = buffer.peekInt(FlagOffset); - MessageType type = - (flag & MessageTypeMask) == MessageTypeMask ? MessageType::Request : MessageType::Response; - bool is_event = (flag & EventMask) == EventMask ? true : false; - int64_t request_id = buffer.peekBEInt(RequestIDOffset); - int32_t body_size = buffer.peekBEInt(BodySizeOffset); - - if (body_size > MaxBodySize || body_size <= 0) { - throw EnvoyException(fmt::format("invalid dubbo message size {}", body_size)); - } - - context->body_size_ = body_size; - - if (type == MessageType::Request) { - RequestMessageImplPtr req = - std::make_unique(request_id, body_size, is_event); - req->fromBuffer(buffer); - context->is_request_ = true; - callbacks_->onRequestMessage(std::move(req)); - } else { - ResponseMessageImplPtr res = - std::make_unique(request_id, body_size, is_event); - res->fromBuffer(buffer); - callbacks_->onResponseMessage(std::move(res)); - } - - buffer.drain(MessageSize); - return true; -} - bool DubboProtocolImpl::decode(Buffer::Instance& buffer, Protocol::Context* context, MessageMetadataSharedPtr metadata) { if (!metadata) { @@ -169,7 +105,8 @@ bool DubboProtocolImpl::decode(Buffer::Instance& buffer, Protocol::Context* cont int64_t request_id = buffer.peekBEInt(RequestIDOffset); int32_t body_size = buffer.peekBEInt(BodySizeOffset); - if (body_size > MaxBodySize || body_size <= 0) { + // The body size of the heartbeat message is zero. + if (body_size > MaxBodySize || body_size < 0) { throw EnvoyException(fmt::format("invalid dubbo message size {}", body_size)); } @@ -195,7 +132,12 @@ bool DubboProtocolImpl::encode(Buffer::Instance& buffer, int32_t body_size, case MessageType::Response: { ASSERT(metadata.response_status().has_value()); buffer.writeBEInt(MagicNumber); - buffer.writeByte(static_cast(metadata.serialization_type())); + uint8_t flag = static_cast(metadata.serialization_type()); + if (metadata.is_event()) { + ASSERT(0 == body_size); + flag = flag ^ EventMask; + } + buffer.writeByte(flag); buffer.writeByte(static_cast(metadata.response_status().value())); buffer.writeBEInt(metadata.request_id()); buffer.writeBEInt(body_size); diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h index 1f3804c0e6c90..6146df34f5114 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h @@ -7,72 +7,11 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -class MessageImpl : public virtual Message { -public: - MessageImpl(int64_t request_id, int32_t body_size, bool is_event) - : request_id_(request_id), body_size_(body_size), is_event_(is_event) {} - virtual ~MessageImpl() {} - virtual int32_t bodySize() const override { return body_size_; } - - // Is a normal message or event - virtual bool isEvent() const override { return is_event_; } - - virtual int64_t requestId() const override { return request_id_; } - - virtual std::string toString() const override { - return fmt::format("body size:{}, is event:{}, request id: {}", body_size_, is_event_, - request_id_); - } - -protected: - int64_t request_id_; - int32_t body_size_; - bool is_event_; -}; - -class RequestMessageImpl : public MessageImpl, public RequestMessage { -public: - using MessageImpl::MessageImpl; - - virtual ~RequestMessageImpl() {} - void fromBuffer(Buffer::Instance& data); - virtual MessageType messageType() const override { return MessageType::Request; } - - virtual SerializationType serializationType() const override { return type_; } - - virtual bool isTwoWay() const override { return is_two_way_; } - -private: - SerializationType type_; - bool is_two_way_; -}; - -typedef std::unique_ptr RequestMessageImplPtr; - -class ResponseMessageImpl : public MessageImpl, public ResponseMessage { -public: - using MessageImpl::MessageImpl; - - virtual ~ResponseMessageImpl() {} - void fromBuffer(Buffer::Instance& data); - - virtual MessageType messageType() const override { return MessageType::Response; } - - virtual ResponseStatus responseStatus() const override { return status_; } - -private: - ResponseStatus status_; -}; - -typedef std::unique_ptr ResponseMessageImplPtr; - class DubboProtocolImpl : public Protocol { public: DubboProtocolImpl() = default; - explicit DubboProtocolImpl(ProtocolCallbacks* callbacks) : callbacks_(callbacks) {} const std::string& name() const override { return ProtocolNames::get().fromType(type()); } ProtocolType type() const override { return ProtocolType::Dubbo; } - bool decode(Buffer::Instance& buffer, Protocol::Context* context) override; bool decode(Buffer::Instance& buffer, Protocol::Context* context, MessageMetadataSharedPtr metadata) override; bool encode(Buffer::Instance& buffer, int32_t body_size, @@ -80,9 +19,6 @@ class DubboProtocolImpl : public Protocol { static constexpr uint8_t MessageSize = 16; static constexpr int32_t MaxBodySize = 16 * 1024 * 1024; - -private: - ProtocolCallbacks* callbacks_; }; } // namespace DubboProxy diff --git a/source/extensions/filters/network/dubbo_proxy/filter.cc b/source/extensions/filters/network/dubbo_proxy/filter.cc deleted file mode 100644 index 823b389143ef8..0000000000000 --- a/source/extensions/filters/network/dubbo_proxy/filter.cc +++ /dev/null @@ -1,238 +0,0 @@ -#include "extensions/filters/network/dubbo_proxy/filter.h" - -#include "envoy/common/exception.h" - -#include "common/common/fmt.h" - -#include "extensions/filters/network/dubbo_proxy/buffer_helper.h" -#include "extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h" -#include "extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace DubboProxy { - -namespace { - -using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; - -typedef std::map ProtocolTypeMap; - -static const ProtocolTypeMap& protocolTypeMap() { - CONSTRUCT_ON_FIRST_USE(ProtocolTypeMap, { - {ConfigProtocolType::Dubbo, ProtocolType::Dubbo}, - }); -} - -ProtocolType lookupProtocolType(ConfigProtocolType config_type) { - const auto& iter = protocolTypeMap().find(config_type); - if (iter == protocolTypeMap().end()) { - throw EnvoyException(fmt::format( - "unknown protocol {}", - envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType_Name(config_type))); - } - return iter->second; -} - -using ConfigSerializationType = - envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; - -typedef std::map SerializationTypeMap; - -static const SerializationTypeMap& serializationTypeMap() { - CONSTRUCT_ON_FIRST_USE(SerializationTypeMap, - { - {ConfigSerializationType::Hessian2, SerializationType::Hessian}, - }); -} - -SerializationType lookupSerializationType(ConfigSerializationType type) { - const auto& iter = serializationTypeMap().find(type); - if (iter == serializationTypeMap().end()) { - throw EnvoyException(fmt::format( - "unknown deserializer {}", - envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType_Name(type))); - } - - return iter->second; -} - -} // namespace - -Filter::Filter(const std::string& stat_prefix, ConfigProtocolType protocol_type, - ConfigSerializationType serialization_type, Stats::Scope& scope, - TimeSource& time_source) - : stats_(DubboFilterStats::generateStats(stat_prefix, scope)), - protocol_type_(lookupProtocolType(protocol_type)), - serialization_type_(lookupSerializationType(serialization_type)), time_source_(time_source) {} - -Filter::~Filter() = default; - -Network::FilterStatus Filter::onData(Buffer::Instance& data, bool) { - if (!sniffing_) { - if (request_buffer_.length() > 0) { - // Stopped sniffing during response (in onWrite). Make sure leftover request_buffer_ contents - // are at the start of data or the upstream will see a corrupted request. - request_buffer_.move(data); - data.move(request_buffer_); - ASSERT(request_buffer_.length() == 0); - } - - return Network::FilterStatus::Continue; - } - - ENVOY_LOG(trace, "dubbo: read {} bytes", data.length()); - request_buffer_.move(data); - - try { - if (!request_decoder_) { - request_decoder_ = createDecoder(*this); - } - - BufferWrapper wrapped(request_buffer_); - request_decoder_->onData(wrapped); - - // Move consumed portion of request back to data for the upstream to consume. - uint64_t pos = wrapped.position(); - if (pos > 0) { - data.move(request_buffer_, pos); - } - } catch (const EnvoyException& ex) { - ENVOY_LOG(error, "dubbo: error {}", ex.what()); - data.move(request_buffer_); - stats_.request_decoding_error_.inc(); - sniffing_ = false; - } - - return Network::FilterStatus::Continue; -} - -Network::FilterStatus Filter::onWrite(Buffer::Instance& data, bool) { - if (!sniffing_) { - if (response_buffer_.length() > 0) { - // Stopped sniffing during request (in onData). Make sure response_buffer_ contents are at the - // start of data or the downstream will see a corrupted response. - response_buffer_.move(data); - data.move(response_buffer_); - ASSERT(response_buffer_.length() == 0); - } - - return Network::FilterStatus::Continue; - } - - ENVOY_LOG(trace, "dubbo: wrote {} bytes", data.length()); - response_buffer_.move(data); - - try { - if (!response_decoder_) { - response_decoder_ = createDecoder(*this); - } - - BufferWrapper wrapped(response_buffer_); - response_decoder_->onData(wrapped); - - // Move consumed portion of response back to data for the downstream to consume. - uint64_t pos = wrapped.position(); - if (pos > 0) { - data.move(response_buffer_, pos); - } - } catch (const EnvoyException& ex) { - ENVOY_LOG(error, "dubbo: error {}", ex.what()); - data.move(response_buffer_); - stats_.response_decoding_error_.inc(); - sniffing_ = false; - } - - return Network::FilterStatus::Continue; -} - -void Filter::onEvent(Network::ConnectionEvent event) { - if (active_call_map_.empty()) { - return; - } - - if (event == Network::ConnectionEvent::RemoteClose) { - stats_.cx_destroy_local_with_active_rq_.inc(); - } - - if (event == Network::ConnectionEvent::LocalClose) { - stats_.cx_destroy_remote_with_active_rq_.inc(); - } -} - -void Filter::onRequestMessage(RequestMessagePtr&& message) { - ASSERT(message); - ASSERT(message->messageType() == MessageType::Request); - - stats_.request_.inc(); - message->isTwoWay() ? stats_.request_twoway_.inc() : stats_.request_oneway_.inc(); - - if (message->isEvent()) { - stats_.request_event_.inc(); - } - - ENVOY_LOG(debug, "dubbo request: started {} message", message->requestId()); - - // One-way messages do not receive responses. - if (!message->isTwoWay()) { - return; - } - - auto request = std::make_unique(*this, message->requestId()); - active_call_map_.emplace(message->requestId(), std::move(request)); -} - -void Filter::onResponseMessage(ResponseMessagePtr&& message) { - ASSERT(message); - ASSERT(message->messageType() == MessageType::Response); - - auto itor = active_call_map_.find(message->requestId()); - if (itor == active_call_map_.end()) { - throw EnvoyException(fmt::format("unknown request id {}", message->requestId())); - } - active_call_map_.erase(itor); - - ENVOY_LOG(debug, "dubbo response: ended {} message", message->requestId()); - - stats_.response_.inc(); - switch (message->responseStatus()) { - case ResponseStatus::Ok: - stats_.response_success_.inc(); - break; - default: - stats_.response_error_.inc(); - ENVOY_LOG(error, "dubbo response status: {}", static_cast(message->responseStatus())); - break; - } -} - -void Filter::onRpcInvocation(RpcInvocationPtr&& invo) { - ENVOY_LOG(debug, "dubbo request: method name is {}, service name is {}, service version {}", - invo->getMethodName(), invo->getServiceName(), invo->getServiceVersion()); -} - -void Filter::onRpcResult(RpcResultPtr&& res) { - if (res->hasException()) { - stats_.response_exception_.inc(); - } -} - -DecoderPtr Filter::createDecoder(ProtocolCallbacks& prot_callback) { - auto parser = createProtocol(prot_callback); - auto serializer = createDeserializer(); - return std::make_unique(std::move(parser), std::move(serializer), *this); -} - -ProtocolPtr Filter::createProtocol(ProtocolCallbacks& callback) { - return NamedProtocolConfigFactory::getFactory(protocol_type_).createProtocol(callback); -} - -DeserializerPtr Filter::createDeserializer() { - return NamedDeserializerConfigFactory::getFactory(serialization_type_).createDeserializer(); -} - -} // namespace DubboProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/dubbo_proxy/filter.h b/source/extensions/filters/network/dubbo_proxy/filter.h deleted file mode 100644 index c8e24d0a58c44..0000000000000 --- a/source/extensions/filters/network/dubbo_proxy/filter.h +++ /dev/null @@ -1,108 +0,0 @@ -#pragma once - -#include "envoy/common/time.h" -#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" -#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" -#include "envoy/network/connection.h" -#include "envoy/network/filter.h" -#include "envoy/stats/scope.h" -#include "envoy/stats/stats.h" -#include "envoy/stats/stats_macros.h" -#include "envoy/stats/timespan.h" - -#include "common/common/logger.h" - -#include "extensions/filters/network/dubbo_proxy/decoder.h" -#include "extensions/filters/network/dubbo_proxy/deserializer.h" -#include "extensions/filters/network/dubbo_proxy/protocol.h" -#include "extensions/filters/network/dubbo_proxy/stats.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace DubboProxy { - -class Filter : public Network::Filter, - public Network::ConnectionCallbacks, - public ProtocolCallbacks, - public DecoderCallbacks, - Logger::Loggable { -public: - using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; - using ConfigSerializationType = - envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; - - Filter(const std::string& stat_prefix, ConfigProtocolType protocol_type, - ConfigSerializationType serialization_type, Stats::Scope& scope, TimeSource& time_source); - virtual ~Filter(); - - // Network::ReadFilter - Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; - Network::FilterStatus onNewConnection() override { return Network::FilterStatus::Continue; } - void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override {} - - // Network::WriteFilter - Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent) override; - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - // ProtocolCallbacks - void onRequestMessage(RequestMessagePtr&& message) override; - void onResponseMessage(ResponseMessagePtr&& message) override; - - // DecoderCallbacks - void onRpcInvocation(RpcInvocationPtr&& invo) override; - void onRpcResult(RpcResultPtr&& res) override; - -private: - DecoderPtr createDecoder(ProtocolCallbacks& prot_callback); - ProtocolPtr createProtocol(ProtocolCallbacks& callback); - DeserializerPtr createDeserializer(); - - // ActiveMessage tracks downstream requests for which no response has been received. - struct ActiveMessage { - ActiveMessage(Filter& parent, int32_t request_id) - : parent_(parent), request_timer_(new Stats::Timespan(parent_.stats_.request_time_ms_, - parent_.time_source_)), - request_id_(request_id) { - parent_.stats_.request_active_.inc(); - } - ~ActiveMessage() { - parent_.stats_.request_active_.dec(); - request_timer_->complete(); - } - - Filter& parent_; - Stats::TimespanPtr request_timer_; - const int32_t request_id_; - absl::optional success_{}; - }; - typedef std::unique_ptr ActiveMessagePtr; - - // Downstream request decoder, callbacks, and buffer. - DecoderPtr request_decoder_; - Buffer::OwnedImpl request_buffer_; - - // Upstream response decoder, callbacks, and buffer. - DecoderPtr response_decoder_; - Buffer::OwnedImpl response_buffer_; - - // List of active request messages. - std::unordered_map active_call_map_; - - bool sniffing_{true}; - DubboFilterStats stats_; - - ProtocolType protocol_type_; - SerializationType serialization_type_; - - TimeSource& time_source_; -}; - -} // namespace DubboProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc new file mode 100644 index 0000000000000..f966f9f86f8d0 --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc @@ -0,0 +1,27 @@ +#include "extensions/filters/network/dubbo_proxy/heartbeat_response.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +DubboFilters::DirectResponse::ResponseType +HeartbeatResponse::encode(MessageMetadata& metadata, DubboProxy::Protocol& protocol, Deserializer&, + Buffer::Instance& buffer) const { + ASSERT(metadata.response_status().value() == ResponseStatus::Ok); + ASSERT(metadata.message_type() == MessageType::Response); + ASSERT(metadata.is_event()); + + const size_t serialized_body_size = 0; + if (!protocol.encode(buffer, serialized_body_size, metadata)) { + throw EnvoyException("failed to encode heartbeat message"); + } + + ENVOY_LOG(debug, "buffer length {}", buffer.length()); + return DirectResponse::ResponseType::SuccessReply; +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.h b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.h new file mode 100644 index 0000000000000..4f53691c7f9bb --- /dev/null +++ b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.h @@ -0,0 +1,26 @@ +#pragma once + +#include "extensions/filters/network/dubbo_proxy/deserializer.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" +#include "extensions/filters/network/dubbo_proxy/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +struct HeartbeatResponse : public DubboFilters::DirectResponse, + Logger::Loggable { + HeartbeatResponse() = default; + ~HeartbeatResponse() override = default; + + using ResponseType = DubboFilters::DirectResponse::ResponseType; + ResponseType encode(MessageMetadata& metadata, Protocol& protocol, Deserializer& deserializer, + Buffer::Instance& buffer) const override; +}; + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc index d317022f9e861..e095ee4fe9bb2 100644 --- a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.cc @@ -14,9 +14,9 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -RpcInvocationPtr HessianDeserializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, - size_t body_size) { - ASSERT(buffer.length() >= body_size); +void HessianDeserializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, + MessageMetadataSharedPtr metadata) { + ASSERT(buffer.length() >= static_cast(body_size)); size_t total_size = 0, size; // TODO(zyfjeff): Add format checker std::string dubbo_version = HessianUtils::peekString(buffer, &size); @@ -28,12 +28,14 @@ RpcInvocationPtr HessianDeserializerImpl::deserializeRpcInvocation(Buffer::Insta std::string method_name = HessianUtils::peekString(buffer, &size, total_size); total_size = total_size + size; - if (body_size < total_size) { + if (static_cast(body_size) < total_size) { throw EnvoyException( fmt::format("RpcInvocation size({}) large than body size({})", total_size, body_size)); } - buffer.drain(body_size); - return std::make_unique(method_name, service_name, service_version); + + metadata->setServiceName(service_name); + metadata->setServiceVersion(service_version); + metadata->setMethodName(method_name); } RpcResultPtr HessianDeserializerImpl::deserializeRpcResult(Buffer::Instance& buffer, @@ -72,7 +74,7 @@ RpcResultPtr HessianDeserializerImpl::deserializeRpcResult(Buffer::Instance& buf fmt::format("RpcResult is no value, but the rest of the body size({}) not equal 0", (body_size - total_size))); } - buffer.drain(body_size); + return result; } diff --git a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h index 7544e362f3440..0e3dbe363f9a2 100644 --- a/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h @@ -14,8 +14,8 @@ class HessianDeserializerImpl : public Deserializer { return DeserializerNames::get().fromType(type()); } virtual SerializationType type() const override { return SerializationType::Hessian; } - virtual RpcInvocationPtr deserializeRpcInvocation(Buffer::Instance& buffer, - size_t body_size) override; + virtual void deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, + MessageMetadataSharedPtr metadata) override; virtual RpcResultPtr deserializeRpcResult(Buffer::Instance& buffer, size_t body_size) override; virtual size_t serializeRpcResult(Buffer::Instance& output_buffer, const std::string& content, RpcResponseType type) override; diff --git a/source/extensions/filters/network/dubbo_proxy/protocol.h b/source/extensions/filters/network/dubbo_proxy/protocol.h index 6fbcb7aca96e0..26ae6f9c58d3c 100644 --- a/source/extensions/filters/network/dubbo_proxy/protocol.h +++ b/source/extensions/filters/network/dubbo_proxy/protocol.h @@ -57,7 +57,7 @@ typedef ConstSingleton ProtocolNames; */ class ProtocolCallbacks { public: - virtual ~ProtocolCallbacks() {} + virtual ~ProtocolCallbacks() = default; virtual void onRequestMessage(RequestMessagePtr&& req) PURE; virtual void onResponseMessage(ResponseMessagePtr&& res) PURE; }; @@ -73,8 +73,8 @@ class Protocol { size_t header_size_ = 0; bool is_heartbeat_ = false; }; - virtual ~Protocol() {} - Protocol() {} + virtual ~Protocol() = default; + Protocol() = default; virtual const std::string& name() const PURE; /** @@ -82,22 +82,6 @@ class Protocol { */ virtual ProtocolType type() const PURE; - /* - * This interface will be deprecated, - * it is reserved for the purpose of compatibility with the existing Filter implementation, - * this interface will be deleted after the new Filter implementation code is submitted. - * - * decodes the dubbo protocol message, potentially invoking callbacks. - * If successful, the message is removed from the buffer. - * - * @param buffer the currently buffered dubbo data. - * @param context save the meta data of current messages - * @return bool true if a complete message was successfully consumed, false if more data - * is required. - * @throws EnvoyException if the data is not valid for this protocol. - */ - virtual bool decode(Buffer::Instance& buffer, Context* context) PURE; - /* * decodes the dubbo protocol message, potentially invoking callbacks. * If successful, the message is removed from the buffer. @@ -131,18 +115,7 @@ typedef std::unique_ptr ProtocolPtr; */ class NamedProtocolConfigFactory { public: - virtual ~NamedProtocolConfigFactory() {} - - /** - * This interface will be deprecated, - * it is reserved for the purpose of compatibility with the existing Filter implementation, - * this interface will be deleted after the new Filter implementation code is submitted. - * - * Create a particular Dubbo protocol. - * @param callbacks the callbacks to be notified of protocol decodes. - * @return protocol instance pointer. - */ - virtual ProtocolPtr createProtocol(ProtocolCallbacks& callbacks) PURE; + virtual ~NamedProtocolConfigFactory() = default; /** * Create a particular Dubbo protocol. @@ -171,10 +144,6 @@ class NamedProtocolConfigFactory { * ProtocolFactoryBase provides a template for a trivial NamedProtocolConfigFactory. */ template class ProtocolFactoryBase : public NamedProtocolConfigFactory { - ProtocolPtr createProtocol(ProtocolCallbacks& callbacks) override { - return std::make_unique(&callbacks); - } - ProtocolPtr createProtocol() override { return std::make_unique(); } std::string name() override { return name_; } diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc index 8898b71168251..7788ed82befc0 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc @@ -152,6 +152,12 @@ void Router::onUpstreamData(Buffer::Instance& data, bool end_stream) { void Router::onEvent(Network::ConnectionEvent event) { if (!upstream_request_ || upstream_request_->response_complete_) { // Client closed connection after completing response. + ENVOY_LOG(debug, "dubbo upstream request: the upstream request had completed"); + return; + } + + if (upstream_request_->stream_reset_ && event == Network::ConnectionEvent::LocalClose) { + ENVOY_LOG(debug, "dubbo upstream request: the stream reset"); return; } @@ -188,7 +194,8 @@ Router::UpstreamRequest::UpstreamRequest(Router& parent, Tcp::ConnectionPool::In deserializer_( NamedDeserializerConfigFactory::getFactory(serialization_type).createDeserializer()), protocol_(NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol()), - request_complete_(false), response_started_(false), response_complete_(false) {} + request_complete_(false), response_started_(false), response_complete_(false), + stream_reset_(false) {} Router::UpstreamRequest::~UpstreamRequest() {} @@ -204,6 +211,8 @@ Network::FilterStatus Router::UpstreamRequest::start() { } void Router::UpstreamRequest::resetStream() { + stream_reset_ = true; + if (conn_pool_handle_) { ASSERT(!conn_data_); conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.h b/source/extensions/filters/network/dubbo_proxy/router/router_impl.h index d30a5989352b6..63bcfa0e4ae4c 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/router_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.h @@ -81,6 +81,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, bool request_complete_ : 1; bool response_started_ : 1; bool response_complete_ : 1; + bool stream_reset_ : 1; }; void cleanup(); diff --git a/source/extensions/filters/network/dubbo_proxy/stats.h b/source/extensions/filters/network/dubbo_proxy/stats.h index b488401854d73..c86225b4ac97e 100644 --- a/source/extensions/filters/network/dubbo_proxy/stats.h +++ b/source/extensions/filters/network/dubbo_proxy/stats.h @@ -19,19 +19,22 @@ namespace DubboProxy { COUNTER(request_twoway) \ COUNTER(request_oneway) \ COUNTER(request_event) \ - COUNTER(request_invalid_type) \ COUNTER(request_decoding_error) \ + COUNTER(request_decoding_success) \ GAUGE(request_active) \ HISTOGRAM(request_time_ms) \ COUNTER(response) \ COUNTER(response_success) \ COUNTER(response_error) \ - COUNTER(response_exception) \ + COUNTER(response_error_caused_connection_close) \ + COUNTER(response_business_exception) \ COUNTER(response_decoding_error) \ + COUNTER(response_decoding_success) \ + COUNTER(local_response_success) \ + COUNTER(local_response_error) \ + COUNTER(local_response_business_exception) \ COUNTER(cx_destroy_local_with_active_rq) \ COUNTER(cx_destroy_remote_with_active_rq) \ - COUNTER(downstream_flow_control_paused_reading_total) \ - COUNTER(downstream_flow_control_resumed_reading_total) \ // clang-format on /** diff --git a/test/extensions/filters/network/dubbo_proxy/BUILD b/test/extensions/filters/network/dubbo_proxy/BUILD index 39ff88513658f..3d40c8ae9bb73 100644 --- a/test/extensions/filters/network/dubbo_proxy/BUILD +++ b/test/extensions/filters/network/dubbo_proxy/BUILD @@ -18,6 +18,7 @@ envoy_cc_mock( srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ + "//source/common/protobuf", "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/dubbo_proxy:decoder_events_lib", "//source/extensions/filters/network/dubbo_proxy:deserializer_interface", @@ -28,6 +29,7 @@ envoy_cc_mock( "//test/mocks/network:network_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/test_common:printers_lib", + "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", ], ) @@ -83,19 +85,13 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_name = "envoy.filters.network.dubbo_proxy", deps = [ - "//source/extensions/filters/network/dubbo_proxy:config", - "//test/mocks/server:server_mocks", - ], -) - -envoy_extension_cc_test( - name = "filter_test", - srcs = ["filter_test.cc"], - extension_name = "envoy.filters.network.dubbo_proxy", - deps = [ + ":mocks_lib", ":utility_lib", - "//source/extensions/filters/network/dubbo_proxy:filter_lib", + "//source/extensions/filters/network/dubbo_proxy:config", + "//source/extensions/filters/network/dubbo_proxy/filters:filter_config_interface", "//test/mocks/server:server_mocks", + "//test/test_common:registry_lib", + "@envoy_api//envoy/config/filter/network/dubbo_proxy/v2alpha1:dubbo_proxy_cc", ], ) @@ -162,3 +158,30 @@ envoy_extension_cc_test( "//test/mocks/server:server_mocks", ], ) + +envoy_extension_cc_test( + name = "decoder_test", + srcs = ["decoder_test.cc"], + extension_name = "envoy.filters.network.dubbo_proxy", + deps = [ + ":mocks_lib", + ":utility_lib", + "//source/extensions/filters/network/dubbo_proxy:decoder_lib", + "//source/extensions/filters/network/dubbo_proxy:metadata_lib", + ], +) + +envoy_extension_cc_test( + name = "conn_manager_test", + srcs = ["conn_manager_test.cc"], + extension_name = "envoy.filters.network.dubbo_proxy", + deps = [ + ":mocks_lib", + ":utility_lib", + "//source/extensions/filters/network/dubbo_proxy:config", + "//source/extensions/filters/network/dubbo_proxy:conn_manager_lib", + "//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib", + "//source/extensions/filters/network/dubbo_proxy:hessian_deserializer_impl_lib", + "//test/mocks/server:server_mocks", + ], +) diff --git a/test/extensions/filters/network/dubbo_proxy/config_test.cc b/test/extensions/filters/network/dubbo_proxy/config_test.cc index 11e5e3f5ef018..9c724b4745629 100644 --- a/test/extensions/filters/network/dubbo_proxy/config_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/config_test.cc @@ -1,8 +1,12 @@ +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" #include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" #include "extensions/filters/network/dubbo_proxy/config.h" +#include "extensions/filters/network/dubbo_proxy/filters/filter_config.h" +#include "test/extensions/filters/network/dubbo_proxy/mocks.h" #include "test/mocks/server/mocks.h" +#include "test/test_common/registry.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -14,14 +18,43 @@ namespace Extensions { namespace NetworkFilters { namespace DubboProxy { -TEST(DubboFilterConfigTest, ValidateFail) { +using DubboProxyProto = envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy; + +namespace { + +DubboProxyProto parseDubboProxyFromV2Yaml(const std::string& yaml) { + DubboProxyProto dubbo_proxy; + MessageUtil::loadFromYaml(yaml, dubbo_proxy); + return dubbo_proxy; +} + +} // namespace + +class DubboFilterConfigTestBase { +public: + void testConfig(DubboProxyProto& config) { + Network::FilterFactoryCb cb; + EXPECT_NO_THROW({ cb = factory_.createFilterFactoryFromProto(config, context_); }); + + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); + } + + NiceMock context_; + DubboProxyFilterConfigFactory factory_; +}; + +class DubboFilterConfigTest : public DubboFilterConfigTestBase, public testing::Test {}; + +TEST_F(DubboFilterConfigTest, ValidateFail) { NiceMock context; EXPECT_THROW(DubboProxyFilterConfigFactory().createFilterFactoryFromProto( envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy(), context), ProtoValidationException); } -TEST(DubboFilterConfigTest, ValidProtoConfiguration) { +TEST_F(DubboFilterConfigTest, ValidProtoConfiguration) { envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy config{}; config.set_stat_prefix("my_stat_prefix"); @@ -30,11 +63,11 @@ TEST(DubboFilterConfigTest, ValidProtoConfiguration) { DubboProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context); Network::MockConnection connection; - EXPECT_CALL(connection, addFilter(_)); + EXPECT_CALL(connection, addReadFilter(_)); cb(connection); } -TEST(DubboFilterConfigTest, DubboProxyWithEmptyProto) { +TEST_F(DubboFilterConfigTest, DubboProxyWithEmptyProto) { NiceMock context; DubboProxyFilterConfigFactory factory; envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy config = @@ -44,10 +77,93 @@ TEST(DubboFilterConfigTest, DubboProxyWithEmptyProto) { Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context); Network::MockConnection connection; - EXPECT_CALL(connection, addFilter(_)); + EXPECT_CALL(connection, addReadFilter(_)); cb(connection); } +// Test config with an explicitly defined router filter. +TEST_F(DubboFilterConfigTest, DubboProxyWithExplicitRouterConfig) { + const std::string yaml = R"EOF( + stat_prefix: dubbo + route_config: + name: local_route + dubbo_filters: + - name: envoy.filters.dubbo.router + )EOF"; + + DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + testConfig(config); +} + +// Test config with an unknown filter. +TEST_F(DubboFilterConfigTest, DubboProxyWithUnknownFilter) { + const std::string yaml = R"EOF( + stat_prefix: dubbo + route_config: + name: local_route + dubbo_filters: + - name: no_such_filter + - name: envoy.filters.dubbo.router + )EOF"; + + DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + + EXPECT_THROW_WITH_REGEX(factory_.createFilterFactoryFromProto(config, context_), EnvoyException, + "no_such_filter"); +} + +// Test config with multiple filters. +TEST_F(DubboFilterConfigTest, DubboProxyWithMultipleFilters) { + const std::string yaml = R"EOF( + stat_prefix: ingress + route_config: + name: local_route + dubbo_filters: + - name: envoy.filters.dubbo.mock_filter + config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + name: test_service + - name: envoy.filters.dubbo.router + )EOF"; + + DubboFilters::MockFilterConfigFactory factory; + Registry::InjectFactory registry(factory); + + DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + testConfig(config); + + EXPECT_EQ(1, factory.config_struct_.fields_size()); + EXPECT_EQ("test_service", factory.config_struct_.fields().at("name").string_value()); + EXPECT_EQ("dubbo.ingress.", factory.config_stat_prefix_); +} + +TEST_F(DubboFilterConfigTest, CreateFilterChain) { + const std::string yaml = R"EOF( + stat_prefix: ingress + route_config: + name: local_route + dubbo_filters: + - name: envoy.filters.dubbo.mock_filter + config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + name: test_service + - name: envoy.filters.dubbo.router + )EOF"; + + DubboFilters::MockFilterConfigFactory factory; + Registry::InjectFactory registry(factory); + + DubboProxyProto dubbo_config = parseDubboProxyFromV2Yaml(yaml); + + NiceMock context; + DubboFilters::MockFilterChainFactoryCallbacks callbacks; + ConfigImpl config(dubbo_config, context); + EXPECT_CALL(callbacks, addDecoderFilter(_)).Times(2); + config.createFilterChain(callbacks); +} + } // namespace DubboProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc new file mode 100644 index 0000000000000..ab7149697c435 --- /dev/null +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -0,0 +1,1274 @@ +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" +#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" + +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/network/dubbo_proxy/app_exception.h" +#include "extensions/filters/network/dubbo_proxy/config.h" +#include "extensions/filters/network/dubbo_proxy/conn_manager.h" +#include "extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.h" +#include "extensions/filters/network/dubbo_proxy/hessian_deserializer_impl.h" + +#include "test/extensions/filters/network/dubbo_proxy/mocks.h" +#include "test/extensions/filters/network/dubbo_proxy/utility.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/printers.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::AnyNumber; +using testing::InSequence; +using testing::Invoke; +using testing::NiceMock; +using testing::Ref; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +using ConfigDubboProxy = envoy::config::filter::network::dubbo_proxy::v2alpha1::DubboProxy; + +class TestConfigImpl : public ConfigImpl { +public: + TestConfigImpl(ConfigDubboProxy proto_config, Server::Configuration::MockFactoryContext& context, + DubboFilters::DecoderFilterSharedPtr decoder_filter, DubboFilterStats& stats) + : ConfigImpl(proto_config, context), decoder_filter_(decoder_filter), stats_(stats) {} + + // ConfigImpl + DubboFilterStats& stats() override { return stats_; } + void createFilterChain(DubboFilters::FilterChainFactoryCallbacks& callbacks) override { + if (custom_filter_) { + callbacks.addDecoderFilter(custom_filter_); + } + callbacks.addDecoderFilter(decoder_filter_); + } + + DeserializerPtr createDeserializer() override { + if (deserializer_) { + return DeserializerPtr{deserializer_}; + } + return ConfigImpl::createDeserializer(); + } + + ProtocolPtr createProtocol() override { + if (protocol_) { + return ProtocolPtr{protocol_}; + } + return ConfigImpl::createProtocol(); + } + + Router::RouteConstSharedPtr route(const MessageMetadata& metadata, + uint64_t random_value) const override { + if (route_) { + return route_; + } + return ConfigImpl::route(metadata, random_value); + } + + DubboFilters::DecoderFilterSharedPtr custom_filter_; + DubboFilters::DecoderFilterSharedPtr decoder_filter_; + DubboFilterStats& stats_; + MockDeserializer* deserializer_{}; + MockProtocol* protocol_{}; + std::shared_ptr route_; +}; + +class ConnectionManagerTest : public testing::Test { +public: + ConnectionManagerTest() : stats_(DubboFilterStats::generateStats("test.", store_)) {} + ~ConnectionManagerTest() { filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); } + + TimeSource& timeSystem() { return factory_context_.dispatcher().timeSource(); } + + void initializeFilter() { initializeFilter(""); } + + void initializeFilter(const std::string& yaml) { + for (const auto& counter : store_.counters()) { + counter->reset(); + } + + if (!yaml.empty()) { + MessageUtil::loadFromYaml(yaml, proto_config_); + MessageUtil::validate(proto_config_); + } + + proto_config_.set_stat_prefix("test"); + decoder_filter_.reset(new NiceMock()); + config_ = + std::make_unique(proto_config_, factory_context_, decoder_filter_, stats_); + if (custom_deserializer_) { + config_->deserializer_ = custom_deserializer_; + } + if (custom_protocol_) { + config_->protocol_ = custom_protocol_; + } + if (custom_filter_) { + config_->custom_filter_ = custom_filter_; + } + + decoder_event_handler_.reset(new NiceMock()); + + ON_CALL(random_, random()).WillByDefault(Return(42)); + filter_ = std::make_unique( + *config_, random_, filter_callbacks_.connection_.dispatcher_.timeSource()); + filter_->initializeReadFilterCallbacks(filter_callbacks_); + filter_->onNewConnection(); + + // NOP currently. + filter_->onAboveWriteBufferHighWatermark(); + filter_->onBelowWriteBufferLowWatermark(); + } + + void writeHessianErrorResponseMessage(Buffer::Instance& buffer, bool is_event, + int64_t request_id) { + uint8_t msg_type = 0x42; // request message, two_way, not event + + if (is_event) { + msg_type = msg_type | 0x20; + } + + buffer.add(std::string{'\xda', '\xbb'}); + buffer.add(static_cast(&msg_type), 1); + buffer.add(std::string{0x46}); // Response status + addInt64(buffer, request_id); // Request Id + buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length + '\x91', // return type, exception + 0x05, 't', 'e', 's', 't'}); // return body + } + + void writeHessianExceptionResponseMessage(Buffer::Instance& buffer, bool is_event, + int64_t request_id) { + uint8_t msg_type = 0x42; // request message, two_way, not event + + if (is_event) { + msg_type = msg_type | 0x20; + } + + buffer.add(std::string{'\xda', '\xbb'}); + buffer.add(static_cast(&msg_type), 1); + buffer.add(std::string{0x14}); + addInt64(buffer, request_id); // Request Id + buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length + '\x90', // return type, exception + 0x05, 't', 'e', 's', 't'}); // return body + } + + void writeInvalidResponseMessage(Buffer::Instance& buffer) { + buffer.add(std::string{ + '\xda', '\xbb', 0x43, 0x14, // Response Message Header, illegal serialization id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Request Id + 0x00, 0x00, 0x00, 0x06, // Body Length + '\x94', // return type + 0x05, 't', 'e', 's', 't', // return body + }); + } + + void writeInvalidRequestMessage(Buffer::Instance& buffer) { + buffer.add(std::string{ + '\xda', '\xbb', '\xc3', 0x00, // Response Message Header, illegal serialization id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Request Id + 0x00, 0x00, 0x00, 0x16, // Body Length + 0x05, '2', '.', '0', '.', '2', // Dubbo version + 0x04, 't', 'e', 's', 't', // Service name + 0x05, '0', '.', '0', '.', '0', // Service version + 0x04, 't', 'e', 's', 't', // method name + }); + } + + void writePartialHessianResponseMessage(Buffer::Instance& buffer, bool is_event, + int64_t request_id, bool start) { + + uint8_t msg_type = 0x42; // request message, two_way, not event + + if (is_event) { + msg_type = msg_type | 0x20; + } + + if (start) { + buffer.add(std::string{'\xda', '\xbb'}); + buffer.add(static_cast(&msg_type), 1); + buffer.add(std::string{0x14}); + addInt64(buffer, request_id); // Request Id + buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length + '\x94'}); // return type, exception + } else { + buffer.add(std::string{0x05, 't', 'e', 's', 't'}); // return body + } + } + + void writeHessianResponseMessage(Buffer::Instance& buffer, bool is_event, int64_t request_id) { + uint8_t msg_type = 0x42; // request message, two_way, not event + + if (is_event) { + msg_type = msg_type | 0x20; + } + + buffer.add(std::string{'\xda', '\xbb'}); + buffer.add(static_cast(&msg_type), 1); + buffer.add(std::string{0x14}); + addInt64(buffer, request_id); // Request Id + buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length + '\x94', 0x05, 't', 'e', 's', 't'}); // return type, exception + } + + void writePartialHessianRequestMessage(Buffer::Instance& buffer, bool is_one_way, bool is_event, + int64_t request_id, bool start) { + uint8_t msg_type = 0xc2; // request message, two_way, not event + if (is_one_way) { + msg_type = msg_type & 0xbf; + } + + if (is_event) { + msg_type = msg_type | 0x20; + } + + if (start) { + buffer.add(std::string{'\xda', '\xbb'}); + buffer.add(static_cast(&msg_type), 1); + buffer.add(std::string{0x00}); + addInt64(buffer, request_id); // Request Id + buffer.add(std::string{0x00, 0x00, 0x00, 0x16, // Body Length + 0x05, '2', '.', '0', '.', '2'}); // Dubbo version + } else { + buffer.add(std::string{ + 0x04, 't', 'e', 's', 't', // Service name + 0x05, '0', '.', '0', '.', '0', // Service version + 0x04, 't', 'e', 's', 't', // method name + }); + } + } + + void writeHessianRequestMessage(Buffer::Instance& buffer, bool is_one_way, bool is_event, + int64_t request_id) { + uint8_t msg_type = 0xc2; // request message, two_way, not event + if (is_one_way) { + msg_type = msg_type & 0xbf; + } + + if (is_event) { + msg_type = msg_type | 0x20; + } + + buffer.add(std::string{'\xda', '\xbb'}); + buffer.add(static_cast(&msg_type), 1); + buffer.add(std::string{0x00}); + addInt64(buffer, request_id); // Request Id + buffer.add(std::string{0x00, 0x00, 0x00, 0x16, // Body Length + 0x05, '2', '.', '0', '.', '2', // Dubbo version + 0x04, 't', 'e', 's', 't', // Service name + 0x05, '0', '.', '0', '.', '0', // Service version + 0x04, 't', 'e', 's', 't'}); // method name + } + + void writeHessianHeartbeatRequestMessage(Buffer::Instance& buffer, int64_t request_id) { + uint8_t msg_type = 0xc2; // request message, two_way, not event + msg_type = msg_type | 0x20; + + buffer.add(std::string{'\xda', '\xbb'}); + buffer.add(static_cast(&msg_type), 1); + buffer.add(std::string{0x00}); + addInt64(buffer, request_id); // Request Id + buffer.add(std::string{0x00, 0x00, 0x00, 0x00}); // Body Length + } + + NiceMock factory_context_; + std::shared_ptr decoder_filter_; + std::shared_ptr decoder_event_handler_; + Stats::IsolatedStoreImpl store_; + DubboFilterStats stats_; + ConfigDubboProxy proto_config_; + + std::unique_ptr config_; + + Buffer::OwnedImpl buffer_; + Buffer::OwnedImpl write_buffer_; + NiceMock filter_callbacks_; + NiceMock random_; + std::unique_ptr filter_; + MockDeserializer* custom_deserializer_{}; + MockProtocol* custom_protocol_{}; + DubboFilters::DecoderFilterSharedPtr custom_filter_; +}; + +TEST_F(ConnectionManagerTest, OnDataHandlesRequestTwoWay) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_twoway").value()); + EXPECT_EQ(0U, store_.counter("test.request_oneway").value()); + EXPECT_EQ(0U, store_.counter("test.request_event").value()); + EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); +} + +TEST_F(ConnectionManagerTest, OnDataHandlesRequestOneWay) { + initializeFilter(); + writeHessianRequestMessage(buffer_, true, false, 0x0F); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.counter("test.request_twoway").value()); + EXPECT_EQ(1U, store_.counter("test.request_oneway").value()); + EXPECT_EQ(0U, store_.counter("test.request_event").value()); + EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, OnDataHandlesHeartbeatEvent) { + initializeFilter(); + writeHessianHeartbeatRequestMessage(buffer_, 0x0F); + + EXPECT_CALL(filter_callbacks_.connection_, write(_, false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void { + ProtocolPtr protocol = filter_->config().createProtocol(); + Protocol::Context ctx; + MessageMetadataSharedPtr metadata(std::make_shared()); + EXPECT_TRUE(protocol->decode(buffer, &ctx, metadata)); + EXPECT_TRUE(ctx.is_heartbeat_); + EXPECT_EQ(metadata->response_status().value(), ResponseStatus::Ok); + EXPECT_EQ(metadata->message_type(), MessageType::Response); + buffer.drain(ctx.header_size_); + })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_event").value()); +} + +TEST_F(ConnectionManagerTest, HandlesHeartbeatWithException) { + custom_protocol_ = new NiceMock(); + initializeFilter(); + + EXPECT_CALL(*custom_protocol_, encode(_, _, _)).WillOnce(Return(false)); + + MessageMetadataSharedPtr meta = std::make_shared(); + EXPECT_THROW_WITH_MESSAGE(filter_->onHeartbeat(meta), EnvoyException, + "failed to encode heartbeat message"); +} + +TEST_F(ConnectionManagerTest, OnDataHandlesMessageSplitAcrossBuffers) { + initializeFilter(); + writePartialHessianRequestMessage(buffer_, false, false, 0x0F, true); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0, buffer_.length()); + + // Complete the buffer + writePartialHessianRequestMessage(buffer_, false, false, 0x0F, false); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_EQ(1U, store_.counter("test.request_twoway").value()); + EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); +} + +TEST_F(ConnectionManagerTest, OnDataHandlesProtocolError) { + initializeFilter(); + writeInvalidRequestMessage(buffer_); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request_decoding_error").value()); + EXPECT_EQ(0, buffer_.length()); + + // Sniffing is now disabled. + bool one_way = true; + writeHessianRequestMessage(buffer_, one_way, false, 0x0F); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.request").value()); +} + +TEST_F(ConnectionManagerTest, OnDataHandlesProtocolErrorOnWrite) { + initializeFilter(); + + // Start the read buffer + writePartialHessianRequestMessage(buffer_, false, false, 0x0F, true); + uint64_t len = buffer_.length(); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + len -= buffer_.length(); + + // Disable sniffing + writeInvalidRequestMessage(write_buffer_); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_NE(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_)); + EXPECT_EQ(1U, store_.counter("test.response_decoding_error").value()); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(ConnectionManagerTest, OnDataStopsSniffingWithTooManyPendingCalls) { + initializeFilter(); + for (int i = 0; i < 64; i++) { + writeHessianRequestMessage(buffer_, false, false, i); + } + + EXPECT_CALL(*decoder_filter_, messageEnd(_)).Times(64); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(64U, store_.gauge("test.request_active").value()); + + // Sniffing is now disabled. + writeInvalidRequestMessage(buffer_); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.request_decoding_error").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, OnWriteHandlesResponse) { + uint64_t request_id = 100; + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, request_id); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + + writeHessianResponseMessage(write_buffer_, false, request_id); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + EXPECT_EQ(callbacks->requestId(), request_id); + EXPECT_EQ(callbacks->connection(), &(filter_callbacks_.connection_)); + EXPECT_GE(callbacks->streamId(), 0); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_)); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.response").value()); + EXPECT_EQ(1U, store_.counter("test.response_success").value()); + EXPECT_EQ(0U, store_.counter("test.response_error").value()); + EXPECT_EQ(0U, store_.counter("test.response_exception").value()); + EXPECT_EQ(0U, store_.counter("test.response_decoding_error").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, HandlesResponseContainExceptionInfo) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 1); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_decoding_success").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + + writeHessianExceptionResponseMessage(write_buffer_, false, 1); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_)); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.response").value()); + EXPECT_EQ(1U, store_.counter("test.response_success").value()); + EXPECT_EQ(0U, store_.counter("test.response_error").value()); + EXPECT_EQ(1U, store_.counter("test.response_decoding_success").value()); + EXPECT_EQ(1U, store_.counter("test.response_business_exception").value()); + EXPECT_EQ(0U, store_.counter("test.response_decoding_error").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, HandlesResponseError) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 1); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + + writeHessianErrorResponseMessage(write_buffer_, false, 1); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, callbacks->upstreamData(write_buffer_)); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.response").value()); + EXPECT_EQ(0U, store_.counter("test.response_success").value()); + EXPECT_EQ(1U, store_.counter("test.response_error").value()); + EXPECT_EQ(0U, store_.counter("test.response_decoding_error").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, OnWriteHandlesResponseException) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 1); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + + writeInvalidRequestMessage(write_buffer_); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Reset, callbacks->upstreamData(write_buffer_)); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); + EXPECT_EQ(0U, store_.counter("test.response_success").value()); + EXPECT_EQ(1U, store_.counter("test.local_response_business_exception").value()); + EXPECT_EQ(1U, store_.counter("test.response_decoding_error").value()); +} + +// Tests stop iteration/resume with multiple filters. +TEST_F(ConnectionManagerTest, OnDataResumesWithNextFilter) { + auto* filter = new NiceMock(); + custom_filter_.reset(filter); + + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)); + + ON_CALL(*filter, transferHeaderTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + ON_CALL(*filter, transferBodyTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + + // First filter stops iteration. + { + EXPECT_CALL(*filter, transportBegin()).WillOnce(Return(Network::FilterStatus::StopIteration)); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + } + + // Resume processing. + { + InSequence s; + EXPECT_CALL(*decoder_filter_, transportBegin()) + .WillOnce(Return(Network::FilterStatus::Continue)); + EXPECT_CALL(*filter, messageEnd(_)).WillOnce(Return(Network::FilterStatus::Continue)); + EXPECT_CALL(*decoder_filter_, messageEnd(_)).WillOnce(Return(Network::FilterStatus::Continue)); + callbacks->continueDecoding(); + } + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); +} + +// Tests multiple filters are invoked in the correct order. +TEST_F(ConnectionManagerTest, OnDataHandlesDubboCallWithMultipleFilters) { + auto* filter = new NiceMock(); + custom_filter_.reset(filter); + initializeFilter(); + + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + ON_CALL(*filter, transferHeaderTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + ON_CALL(*filter, transferBodyTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + + InSequence s; + EXPECT_CALL(*filter, transportBegin()).WillOnce(Return(Network::FilterStatus::Continue)); + EXPECT_CALL(*decoder_filter_, transportBegin()).WillOnce(Return(Network::FilterStatus::Continue)); + EXPECT_CALL(*filter, messageEnd(_)).WillOnce(Return(Network::FilterStatus::Continue)); + EXPECT_CALL(*decoder_filter_, messageEnd(_)).WillOnce(Return(Network::FilterStatus::Continue)); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, PipelinedRequestAndResponse) { + initializeFilter(); + + writeHessianRequestMessage(buffer_, false, false, 1); + writeHessianRequestMessage(buffer_, false, false, 2); + + std::list callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillRepeatedly(Invoke( + [&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks.push_back(&cb); })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(2U, store_.gauge("test.request_active").value()); + EXPECT_EQ(2U, store_.counter("test.request").value()); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(2); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + + writeHessianResponseMessage(write_buffer_, false, 0x01); + callbacks.front()->startUpstreamResponse(deserializer, protocol); + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, + callbacks.front()->upstreamData(write_buffer_)); + callbacks.pop_front(); + EXPECT_EQ(1U, store_.counter("test.response").value()); + EXPECT_EQ(1U, store_.counter("test.response_success").value()); + + writeHessianResponseMessage(write_buffer_, false, 0x02); + callbacks.front()->startUpstreamResponse(deserializer, protocol); + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Complete, + callbacks.front()->upstreamData(write_buffer_)); + callbacks.pop_front(); + EXPECT_EQ(2U, store_.counter("test.response").value()); + EXPECT_EQ(2U, store_.counter("test.response_success").value()); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, ResetDownstreamConnection) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); + callbacks->resetDownstreamConnection(); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, OnEvent) { + // No active calls + { + initializeFilter(); + filter_->onEvent(Network::ConnectionEvent::RemoteClose); + filter_->onEvent(Network::ConnectionEvent::LocalClose); + EXPECT_EQ(0U, store_.counter("test.cx_destroy_local_with_active_rq").value()); + EXPECT_EQ(0U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); + } + + // Remote close mid-request + { + initializeFilter(); + + writePartialHessianRequestMessage(buffer_, false, false, 1, true); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + filter_->onEvent(Network::ConnectionEvent::RemoteClose); + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); + } + + // Local close mid-request + { + initializeFilter(); + writePartialHessianRequestMessage(buffer_, false, false, 1, true); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + filter_->onEvent(Network::ConnectionEvent::LocalClose); + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.cx_destroy_local_with_active_rq").value()); + + buffer_.drain(buffer_.length()); + } + + // Remote close before response + { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + filter_->onEvent(Network::ConnectionEvent::RemoteClose); + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); + + buffer_.drain(buffer_.length()); + } + + // Local close before response + { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + filter_->onEvent(Network::ConnectionEvent::LocalClose); + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.cx_destroy_local_with_active_rq").value()); + + buffer_.drain(buffer_.length()); + } +} + +TEST_F(ConnectionManagerTest, ResponseWithUnknownSequenceID) { + initializeFilter(); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + writeHessianRequestMessage(buffer_, false, false, 1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + writeHessianResponseMessage(write_buffer_, false, 10); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Reset, callbacks->upstreamData(write_buffer_)); + EXPECT_EQ(1U, store_.counter("test.response_decoding_error").value()); +} + +TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) { + auto* filter = new NiceMock(); + custom_filter_.reset(filter); + + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 1); + + ON_CALL(*filter, transferHeaderTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + ON_CALL(*filter, transferBodyTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)); + + const std::string fake_response("mock dubbo response"); + NiceMock direct_response; + EXPECT_CALL(direct_response, encode(_, _, _, _)) + .WillOnce(Invoke([&](MessageMetadata&, Protocol&, Deserializer&, + Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + buffer.add(fake_response); + return DubboFilters::DirectResponse::ResponseType::SuccessReply; + })); + + // First filter sends local reply. + EXPECT_CALL(*filter, messageEnd(_)) + .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> Network::FilterStatus { + callbacks->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); + callbacks->sendLocalReply(direct_response, false); + return Network::FilterStatus::StopIteration; + })); + EXPECT_CALL(filter_callbacks_.connection_, write(_, false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void { + EXPECT_EQ(fake_response, buffer.toString()); + })); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(SerializationType::Hessian, callbacks->downstreamSerializationType()); + EXPECT_EQ(ProtocolType::Dubbo, callbacks->downstreamProtocolType()); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.local_response_success").value()); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) { + auto* filter = new NiceMock(); + custom_filter_.reset(filter); + + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 1); + + ON_CALL(*filter, transferHeaderTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + ON_CALL(*filter, transferBodyTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)); + + const std::string fake_response("mock dubbo response"); + NiceMock direct_response; + EXPECT_CALL(direct_response, encode(_, _, _, _)) + .WillOnce(Invoke([&](MessageMetadata&, Protocol&, Deserializer&, + Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + buffer.add(fake_response); + return DubboFilters::DirectResponse::ResponseType::ErrorReply; + })); + + // First filter sends local reply. + EXPECT_CALL(*filter, messageEnd(_)) + .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> Network::FilterStatus { + callbacks->sendLocalReply(direct_response, false); + return Network::FilterStatus::StopIteration; + })); + EXPECT_CALL(filter_callbacks_.connection_, write(_, false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void { + EXPECT_EQ(fake_response, buffer.toString()); + })); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.local_response_error").value()); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, TwoWayRequestWithEndStream) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + ON_CALL(*decoder_filter_, transferHeaderTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)) + .Times(1); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); +} + +TEST_F(ConnectionManagerTest, OneWayRequestWithEndStream) { + initializeFilter(); + writeHessianRequestMessage(buffer_, true, false, 0x0F); + + EXPECT_CALL(*decoder_filter_, messageEnd(_)) + .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)) + .Times(0); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0); + EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); +} + +TEST_F(ConnectionManagerTest, EmptyRequestData) { + initializeFilter(); + buffer_.drain(buffer_.length()); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0); + EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, StopHandleRequest) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + ON_CALL(*decoder_filter_, transferHeaderTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance&, size_t) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)) + .Times(0); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); +} + +TEST_F(ConnectionManagerTest, HandlesHeartbeatEventWithConnectionClose) { + initializeFilter(); + writeHessianHeartbeatRequestMessage(buffer_, 0x0F); + + EXPECT_CALL(filter_callbacks_.connection_, write(_, false)).Times(0); + + filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_event").value()); +} + +TEST_F(ConnectionManagerTest, SendsLocalReplyWithCloseConnection) { + initializeFilter(); + + const std::string fake_response("mock dubbo response"); + NiceMock direct_response; + EXPECT_CALL(direct_response, encode(_, _, _, _)) + .WillOnce(Invoke([&](MessageMetadata&, Protocol&, Deserializer&, + Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + buffer.add(fake_response); + return DubboFilters::DirectResponse::ResponseType::ErrorReply; + })); + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)) + .Times(1); + + MessageMetadata metadata; + filter_->sendLocalReply(metadata, direct_response, true); + EXPECT_EQ(1U, store_.counter("test.local_response_error").value()); + + // The connection closed. + EXPECT_CALL(direct_response, encode(_, _, _, _)).Times(0); + filter_->sendLocalReply(metadata, direct_response, true); +} + +TEST_F(ConnectionManagerTest, ContinueDecodingWithHalfClose) { + initializeFilter(); + writeHessianRequestMessage(buffer_, true, false, 0x0F); + + EXPECT_CALL(*decoder_filter_, messageEnd(_)) + .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)) + .Times(0); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0); + EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); + + EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)) + .Times(1); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + filter_->continueDecoding(); +} + +TEST_F(ConnectionManagerTest, RoutingSuccess) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + config_->route_ = std::make_shared(); + EXPECT_EQ(config_->route_, callbacks->route()); + + // Use the cache. + EXPECT_NE(nullptr, callbacks->route()); +} + +TEST_F(ConnectionManagerTest, RoutingFailure) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + EXPECT_CALL(*decoder_filter_, transportBegin()).WillOnce(Invoke([&]() -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + // The metadata is nullptr. + config_->route_ = std::make_shared(); + EXPECT_EQ(nullptr, callbacks->route()); +} + +TEST_F(ConnectionManagerTest, ResetStream) { + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, 0x0F); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + callbacks->resetStream(); +} + +TEST_F(ConnectionManagerTest, NeedMoreDataForHandleResponse) { + uint64_t request_id = 100; + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, request_id); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + + writePartialHessianRequestMessage(write_buffer_, false, false, 0x0F, true); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::MoreData, callbacks->upstreamData(write_buffer_)); +} + +TEST_F(ConnectionManagerTest, PendingMessageEnd) { + uint64_t request_id = 100; + initializeFilter(); + writeHessianRequestMessage(buffer_, false, false, request_id); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + EXPECT_CALL(*decoder_filter_, messageEnd(_)) + .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); +} + +TEST_F(ConnectionManagerTest, Routing) { + const std::string yaml = R"EOF( +stat_prefix: test +protocol_type: Dubbo +serialization_type: Hessian2 +route_config: + - name: test1 + interface: org.apache.dubbo.demo.DemoService + routes: + - match: + method: + name: + regex: "(.*?)" + route: + cluster: user_service_dubbo_server +)EOF"; + + initializeFilter(yaml); + writeHessianRequestMessage(buffer_, false, false, 100); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + EXPECT_CALL(*decoder_filter_, messageEnd(_)) + .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata) -> Network::FilterStatus { + metadata->setServiceName("org.apache.dubbo.demo.DemoService"); + metadata->setMethodName("test"); + return Network::FilterStatus::StopIteration; + })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.gauge("test.request_active").value()); + + Router::RouteConstSharedPtr route = callbacks->route(); + EXPECT_NE(nullptr, route); + EXPECT_NE(nullptr, route->routeEntry()); + EXPECT_EQ("user_service_dubbo_server", route->routeEntry()->clusterName()); +} + +TEST_F(ConnectionManagerTest, TransportEndWithConnectionClose) { + initializeFilter(); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + writeHessianRequestMessage(buffer_, false, false, 1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + writeHessianResponseMessage(write_buffer_, false, 1); + + DubboProtocolImpl protocol; + HessianDeserializerImpl deserializer; + callbacks->startUpstreamResponse(deserializer, protocol); + + filter_callbacks_.connection_.close(Network::ConnectionCloseType::FlushWrite); + + EXPECT_EQ(DubboFilters::UpstreamResponseStatus::Reset, callbacks->upstreamData(write_buffer_)); + EXPECT_EQ(1U, store_.counter("test.response_error_caused_connection_close").value()); +} + +TEST_F(ConnectionManagerTest, TransportBeginReturnStopIteration) { + initializeFilter(); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_CALL(*decoder_filter_, transportBegin()).WillOnce(Invoke([&]() -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + EXPECT_CALL(*decoder_filter_, messageBegin(_, _, _)).Times(0); + EXPECT_CALL(*decoder_filter_, messageEnd(_)).Times(0); + EXPECT_CALL(*decoder_filter_, transferBodyTo(_, _)).Times(0); + EXPECT_CALL(*decoder_filter_, transportEnd()).Times(0); + + // The sendLocalReply is not called and the message type is not oneway, + // the ActiveMessage object is not destroyed. + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(0); + + writeHessianRequestMessage(buffer_, false, false, 1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + // Buffer data should be consumed. + EXPECT_EQ(0, buffer_.length()); + + // The finalizeRequest should not be called. + EXPECT_EQ(0U, store_.counter("test.request").value()); +} + +TEST_F(ConnectionManagerTest, SendLocalReplyInTransportBegin) { + initializeFilter(); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + const std::string fake_response("mock dubbo response"); + NiceMock direct_response; + EXPECT_CALL(direct_response, encode(_, _, _, _)) + .WillOnce(Invoke([&](MessageMetadata&, Protocol&, Deserializer&, + Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + buffer.add(fake_response); + return DubboFilters::DirectResponse::ResponseType::ErrorReply; + })); + EXPECT_CALL(*decoder_filter_, transportBegin()).WillOnce(Invoke([&]() -> Network::FilterStatus { + callbacks->sendLocalReply(direct_response, false); + return Network::FilterStatus::StopIteration; + })); + + EXPECT_CALL(*decoder_filter_, messageBegin(_, _, _)).Times(0); + EXPECT_CALL(*decoder_filter_, messageEnd(_)).Times(0); + EXPECT_CALL(*decoder_filter_, transferBodyTo(_, _)).Times(0); + EXPECT_CALL(*decoder_filter_, transportEnd()).Times(0); + + // The sendLocalReply is called, the ActiveMessage object should be destroyed. + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + + writeHessianRequestMessage(buffer_, false, false, 1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + // Buffer data should be consumed. + EXPECT_EQ(0, buffer_.length()); + + // The finalizeRequest should be called. + EXPECT_EQ(1U, store_.counter("test.request").value()); +} + +TEST_F(ConnectionManagerTest, SendLocalReplyInMessageBegin) { + initializeFilter(); + + DubboFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + const std::string fake_response("mock dubbo response"); + NiceMock direct_response; + EXPECT_CALL(direct_response, encode(_, _, _, _)) + .WillOnce(Invoke([&](MessageMetadata&, Protocol&, Deserializer&, + Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + buffer.add(fake_response); + return DubboFilters::DirectResponse::ResponseType::ErrorReply; + })); + EXPECT_CALL(*decoder_filter_, messageBegin(_, _, _)) + .WillOnce(Invoke([&](MessageType, int64_t, SerializationType) -> Network::FilterStatus { + callbacks->sendLocalReply(direct_response, false); + return Network::FilterStatus::StopIteration; + })); + + EXPECT_CALL(*decoder_filter_, messageEnd(_)).Times(0); + EXPECT_CALL(*decoder_filter_, transferBodyTo(_, _)).Times(0); + EXPECT_CALL(*decoder_filter_, transportEnd()).Times(0); + + // The sendLocalReply is called, the ActiveMessage object should be destroyed. + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + + writeHessianRequestMessage(buffer_, false, false, 1); + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + + // Buffer data should be consumed. + EXPECT_EQ(0, buffer_.length()); + + // The finalizeRequest should be called. + EXPECT_EQ(1U, store_.counter("test.request").value()); +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/dubbo_proxy/decoder_test.cc b/test/extensions/filters/network/dubbo_proxy/decoder_test.cc new file mode 100644 index 0000000000000..96f5d313d4420 --- /dev/null +++ b/test/extensions/filters/network/dubbo_proxy/decoder_test.cc @@ -0,0 +1,255 @@ +#include "extensions/filters/network/dubbo_proxy/decoder.h" +#include "extensions/filters/network/dubbo_proxy/deserializer_impl.h" +#include "extensions/filters/network/dubbo_proxy/metadata.h" + +#include "test/extensions/filters/network/dubbo_proxy/mocks.h" +#include "test/extensions/filters/network/dubbo_proxy/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Return; +using testing::ReturnRef; +using testing::TestParamInfo; +using testing::TestWithParam; +using testing::Values; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DubboProxy { + +class DecoderStateMachineTestBase { +public: + DecoderStateMachineTestBase() : metadata_(std::make_shared()) { + context_.header_size_ = 16; + } + virtual ~DecoderStateMachineTestBase() {} + + void initHandler() { + EXPECT_CALL(decoder_callback_, newDecoderEventHandler()) + .WillOnce(Invoke([this]() -> DecoderEventHandler* { return &handler_; })); + } + + void initProtocolDecoder(MessageType type, int32_t body_size, bool is_heartbeat = false) { + EXPECT_CALL(protocol_, decode(_, _, _)) + .WillOnce(Invoke([=](Buffer::Instance&, Protocol::Context* context, + MessageMetadataSharedPtr metadata) -> bool { + context->is_heartbeat_ = is_heartbeat; + context->body_size_ = body_size; + metadata->setMessageType(type); + return true; + })); + } + + NiceMock protocol_; + NiceMock deserializer_; + NiceMock handler_; + NiceMock decoder_callback_; + MessageMetadataSharedPtr metadata_; + Protocol::Context context_; +}; + +class DubboDecoderStateMachineTest : public DecoderStateMachineTestBase, public testing::Test {}; + +class DubboDecoderTest : public testing::Test { +public: + DubboDecoderTest() = default; + virtual ~DubboDecoderTest() override = default; + + NiceMock protocol_; + NiceMock deserializer_; + NiceMock callbacks_; +}; + +TEST_F(DubboDecoderStateMachineTest, EmptyData) { + EXPECT_CALL(protocol_, decode(_, _, _)).Times(1); + EXPECT_CALL(handler_, transferHeaderTo(_, _)).Times(0); + EXPECT_CALL(handler_, messageBegin(_, _, _)).Times(0); + + DecoderStateMachine dsm(protocol_, deserializer_, metadata_, decoder_callback_); + Buffer::OwnedImpl buffer; + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); +} + +TEST_F(DubboDecoderStateMachineTest, OnlyHaveHeaderData) { + initHandler(); + initProtocolDecoder(MessageType::Request, 1, false); + + EXPECT_CALL(handler_, transportBegin()).Times(1); + EXPECT_CALL(handler_, transferHeaderTo(_, _)).Times(1); + EXPECT_CALL(handler_, messageBegin(_, _, _)).Times(1); + EXPECT_CALL(handler_, messageEnd(_)).Times(0); + + Buffer::OwnedImpl buffer; + DecoderStateMachine dsm(protocol_, deserializer_, metadata_, decoder_callback_); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); +} + +TEST_F(DubboDecoderStateMachineTest, RequestMessageCallbacks) { + initHandler(); + initProtocolDecoder(MessageType::Request, 0, false); + + EXPECT_CALL(handler_, transportBegin()).Times(1); + EXPECT_CALL(handler_, transferHeaderTo(_, _)).Times(1); + EXPECT_CALL(handler_, messageBegin(_, _, _)).Times(1); + EXPECT_CALL(handler_, messageEnd(_)).Times(1); + EXPECT_CALL(handler_, transferBodyTo(_, _)).Times(1); + EXPECT_CALL(handler_, transportEnd()).Times(1); + + EXPECT_CALL(deserializer_, deserializeRpcInvocation(_, _, _)).WillOnce(Return()); + + DecoderStateMachine dsm(protocol_, deserializer_, metadata_, decoder_callback_); + Buffer::OwnedImpl buffer; + EXPECT_EQ(dsm.run(buffer), ProtocolState::Done); +} + +TEST_F(DubboDecoderStateMachineTest, ResponseMessageCallbacks) { + initHandler(); + initProtocolDecoder(MessageType::Response, 0, false); + + EXPECT_CALL(handler_, transportBegin()).Times(1); + EXPECT_CALL(handler_, transferHeaderTo(_, _)).Times(1); + EXPECT_CALL(handler_, messageBegin(_, _, _)).Times(1); + EXPECT_CALL(handler_, messageEnd(_)).Times(1); + EXPECT_CALL(handler_, transferBodyTo(_, _)).Times(1); + EXPECT_CALL(handler_, transportEnd()).Times(1); + + EXPECT_CALL(deserializer_, deserializeRpcResult(_, _)) + .WillOnce(Invoke([](Buffer::Instance&, size_t) -> RpcResultPtr { + return std::make_unique(false); + })); + + DecoderStateMachine dsm(protocol_, deserializer_, metadata_, decoder_callback_); + Buffer::OwnedImpl buffer; + EXPECT_EQ(dsm.run(buffer), ProtocolState::Done); +} + +TEST_F(DubboDecoderStateMachineTest, DeserializeRpcInvocationException) { + initHandler(); + initProtocolDecoder(MessageType::Request, 0, false); + + EXPECT_CALL(handler_, messageEnd(_)).Times(0); + EXPECT_CALL(handler_, transferBodyTo(_, _)).Times(0); + EXPECT_CALL(handler_, transportEnd()).Times(0); + + EXPECT_CALL(deserializer_, deserializeRpcInvocation(_, _, _)) + .WillOnce(Invoke([](Buffer::Instance&, int32_t, MessageMetadataSharedPtr) -> void { + throw EnvoyException(fmt::format("mock deserialize exception")); + })); + + DecoderStateMachine dsm(protocol_, deserializer_, metadata_, decoder_callback_); + + Buffer::OwnedImpl buffer; + EXPECT_THROW_WITH_MESSAGE(dsm.run(buffer), EnvoyException, "mock deserialize exception"); + EXPECT_EQ(dsm.currentState(), ProtocolState::OnMessageEnd); +} + +TEST_F(DubboDecoderStateMachineTest, DeserializeRpcResultException) { + initHandler(); + initProtocolDecoder(MessageType::Response, 0, false); + + EXPECT_CALL(handler_, messageEnd(_)).Times(0); + EXPECT_CALL(handler_, transferBodyTo(_, _)).Times(0); + EXPECT_CALL(handler_, transportEnd()).Times(0); + + EXPECT_CALL(deserializer_, deserializeRpcResult(_, _)) + .WillOnce(Invoke([](Buffer::Instance&, size_t) -> RpcResultPtr { + throw EnvoyException(fmt::format("mock deserialize exception")); + })); + + DecoderStateMachine dsm(protocol_, deserializer_, metadata_, decoder_callback_); + + Buffer::OwnedImpl buffer; + EXPECT_THROW_WITH_MESSAGE(dsm.run(buffer), EnvoyException, "mock deserialize exception"); + EXPECT_EQ(dsm.currentState(), ProtocolState::OnMessageEnd); +} + +TEST_F(DubboDecoderStateMachineTest, ProtocolDecodeException) { + EXPECT_CALL(decoder_callback_, newDecoderEventHandler()).Times(0); + EXPECT_CALL(protocol_, decode(_, _, _)) + .WillOnce(Invoke([](Buffer::Instance&, Protocol::Context*, MessageMetadataSharedPtr) -> bool { + throw EnvoyException(fmt::format("mock deserialize exception")); + })); + + DecoderStateMachine dsm(protocol_, deserializer_, metadata_, decoder_callback_); + + Buffer::OwnedImpl buffer; + EXPECT_THROW_WITH_MESSAGE(dsm.run(buffer), EnvoyException, "mock deserialize exception"); + EXPECT_EQ(dsm.currentState(), ProtocolState::OnTransportBegin); +} + +TEST_F(DubboDecoderTest, NeedMoreDataForProtocolHeader) { + EXPECT_CALL(protocol_, decode(_, _, _)) + .WillOnce(Invoke([](Buffer::Instance&, Protocol::Context*, MessageMetadataSharedPtr) -> bool { + return false; + })); + EXPECT_CALL(callbacks_, newDecoderEventHandler()).Times(0); + + Decoder decoder(protocol_, deserializer_, callbacks_); + + Buffer::OwnedImpl buffer; + bool buffer_underflow; + EXPECT_EQ(decoder.onData(buffer, buffer_underflow), Network::FilterStatus::Continue); + EXPECT_EQ(buffer_underflow, true); +} + +TEST_F(DubboDecoderTest, NeedMoreDataForProtocolBody) { + EXPECT_CALL(protocol_, decode(_, _, _)) + .WillOnce(Invoke([](Buffer::Instance&, Protocol::Context* context, + MessageMetadataSharedPtr metadata) -> bool { + metadata->setMessageType(MessageType::Request); + context->body_size_ = 10; + return true; + })); + EXPECT_CALL(callbacks_, newDecoderEventHandler()).Times(1); + EXPECT_CALL(callbacks_.handler_, transportBegin()).Times(1); + EXPECT_CALL(callbacks_.handler_, transferHeaderTo(_, _)).Times(1); + EXPECT_CALL(callbacks_.handler_, messageBegin(_, _, _)).Times(1); + EXPECT_CALL(callbacks_.handler_, messageEnd(_)).Times(0); + EXPECT_CALL(callbacks_.handler_, transferBodyTo(_, _)).Times(0); + EXPECT_CALL(callbacks_.handler_, transportEnd()).Times(0); + + Decoder decoder(protocol_, deserializer_, callbacks_); + + Buffer::OwnedImpl buffer; + bool buffer_underflow; + EXPECT_EQ(decoder.onData(buffer, buffer_underflow), Network::FilterStatus::Continue); + EXPECT_EQ(buffer_underflow, true); +} + +TEST_F(DubboDecoderTest, decodeResponseMessage) { + Buffer::OwnedImpl buffer; + buffer.add(std::string({'\xda', '\xbb', '\xc2', 0x00})); + + EXPECT_CALL(protocol_, decode(_, _, _)) + .WillOnce(Invoke([&](Buffer::Instance&, Protocol::Context* context, + MessageMetadataSharedPtr metadata) -> bool { + metadata->setMessageType(MessageType::Response); + context->body_size_ = buffer.length(); + return true; + })); + EXPECT_CALL(deserializer_, deserializeRpcResult(_, _)) + .WillOnce(Invoke([](Buffer::Instance&, size_t) -> RpcResultPtr { + return std::make_unique(true); + })); + EXPECT_CALL(callbacks_, newDecoderEventHandler()).Times(1); + EXPECT_CALL(callbacks_.handler_, transportBegin()).Times(1); + EXPECT_CALL(callbacks_.handler_, transferHeaderTo(_, _)).Times(1); + EXPECT_CALL(callbacks_.handler_, messageBegin(_, _, _)).Times(1); + EXPECT_CALL(callbacks_.handler_, messageEnd(_)).Times(1); + EXPECT_CALL(callbacks_.handler_, transferBodyTo(_, _)).Times(1); + EXPECT_CALL(callbacks_.handler_, transportEnd()).Times(1); + + Decoder decoder(protocol_, deserializer_, callbacks_); + + bool buffer_underflow; + EXPECT_EQ(decoder.onData(buffer, buffer_underflow), Network::FilterStatus::Continue); + EXPECT_EQ(buffer_underflow, false); +} + +} // namespace DubboProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc b/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc index ec95ff72447d7..f98cb0dee33cd 100644 --- a/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc @@ -18,70 +18,63 @@ using testing::StrictMock; TEST(DubboProtocolImplTest, NotEnoughData) { Buffer::OwnedImpl buffer; - MockProtocolCallbacks cb; - DubboProtocolImpl dubbo_protocol(&cb); + DubboProtocolImpl dubbo_protocol; Protocol::Context context; - EXPECT_FALSE(dubbo_protocol.decode(buffer, &context)); + MessageMetadataSharedPtr metadata = std::make_shared(); + EXPECT_FALSE(dubbo_protocol.decode(buffer, &context, metadata)); buffer.add(std::string(15, 0x00)); - EXPECT_FALSE(dubbo_protocol.decode(buffer, &context)); + EXPECT_FALSE(dubbo_protocol.decode(buffer, &context, metadata)); } TEST(DubboProtocolImplTest, Name) { - MockProtocolCallbacks cb; - DubboProtocolImpl dubbo_protocol(&cb); + DubboProtocolImpl dubbo_protocol; EXPECT_EQ(dubbo_protocol.name(), "dubbo"); } TEST(DubboProtocolImplTest, Normal) { - MockProtocolCallbacks cb; - DubboProtocolImpl dubbo_protocol(&cb); + DubboProtocolImpl dubbo_protocol; // Normal dubbo request message { Buffer::OwnedImpl buffer; Protocol::Context context; + MessageMetadataSharedPtr metadata = std::make_shared(); buffer.add(std::string({'\xda', '\xbb', '\xc2', 0x00})); addInt64(buffer, 1); addInt32(buffer, 1); - EXPECT_CALL(cb, onRequestMessageRvr).WillOnce(Invoke([&](RequestMessage* res) -> void { - EXPECT_EQ(MessageType::Request, res->messageType()); - EXPECT_EQ(SerializationType::Hessian, res->serializationType()); - EXPECT_EQ(1, res->bodySize()); - EXPECT_GE(res->toString().size(), 0); - })); - EXPECT_TRUE(dubbo_protocol.decode(buffer, &context)); + EXPECT_TRUE(dubbo_protocol.decode(buffer, &context, metadata)); + EXPECT_EQ(1, metadata->request_id()); EXPECT_EQ(1, context.body_size_); - EXPECT_TRUE(context.is_request_); + EXPECT_EQ(false, context.is_heartbeat_); + EXPECT_EQ(MessageType::Request, metadata->message_type()); } // Normal dubbo response message { Buffer::OwnedImpl buffer; Protocol::Context context; + MessageMetadataSharedPtr metadata = std::make_shared(); buffer.add(std::string({'\xda', '\xbb', 0x42, 20})); addInt64(buffer, 1); addInt32(buffer, 1); - EXPECT_CALL(cb, onResponseMessageRvr).WillOnce(Invoke([](ResponseMessage* res) -> void { - EXPECT_EQ(ResponseStatus::Ok, res->responseStatus()); - EXPECT_EQ(MessageType::Response, res->messageType()); - EXPECT_GE(res->toString().size(), 0); - })); - EXPECT_TRUE(dubbo_protocol.decode(buffer, &context)); + EXPECT_TRUE(dubbo_protocol.decode(buffer, &context, metadata)); + EXPECT_EQ(1, metadata->request_id()); EXPECT_EQ(1, context.body_size_); - EXPECT_FALSE(context.is_request_); + EXPECT_EQ(false, context.is_heartbeat_); + EXPECT_EQ(MessageType::Response, metadata->message_type()); } } TEST(DubboProtocolImplTest, InvalidProtocol) { - MockProtocolCallbacks cb; - DubboProtocolImpl dubbo_protocol(&cb); + DubboProtocolImpl dubbo_protocol; Protocol::Context context; + MessageMetadataSharedPtr metadata = std::make_shared(); // Invalid dubbo magic number { Buffer::OwnedImpl buffer; addInt64(buffer, 0); addInt64(buffer, 0); - EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context, metadata), EnvoyException, "invalid dubbo message magic number 0"); } @@ -93,7 +86,7 @@ TEST(DubboProtocolImplTest, InvalidProtocol) { addInt32(buffer, DubboProtocolImpl::MaxBodySize + 1); std::string exception_string = fmt::format("invalid dubbo message size {}", DubboProtocolImpl::MaxBodySize + 1); - EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context, metadata), EnvoyException, exception_string); } @@ -103,7 +96,7 @@ TEST(DubboProtocolImplTest, InvalidProtocol) { buffer.add(std::string({'\xda', '\xbb', '\xc3', 0x00})); addInt64(buffer, 1); addInt32(buffer, 0xff); - EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context, metadata), EnvoyException, "invalid dubbo message serialization type 3"); } @@ -113,14 +106,13 @@ TEST(DubboProtocolImplTest, InvalidProtocol) { buffer.add(std::string({'\xda', '\xbb', 0x42, 0x00})); addInt64(buffer, 1); addInt32(buffer, 0xff); - EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(dubbo_protocol.decode(buffer, &context, metadata), EnvoyException, "invalid dubbo message response status 0"); } } TEST(DubboProtocolImplTest, DubboProtocolConfigFactory) { - MockProtocolCallbacks cb; - auto protocol = NamedProtocolConfigFactory::getFactory(ProtocolType::Dubbo).createProtocol(cb); + auto protocol = NamedProtocolConfigFactory::getFactory(ProtocolType::Dubbo).createProtocol(); EXPECT_EQ(protocol->name(), "dubbo"); EXPECT_EQ(protocol->type(), ProtocolType::Dubbo); } diff --git a/test/extensions/filters/network/dubbo_proxy/filter_test.cc b/test/extensions/filters/network/dubbo_proxy/filter_test.cc deleted file mode 100644 index a2139f3505f73..0000000000000 --- a/test/extensions/filters/network/dubbo_proxy/filter_test.cc +++ /dev/null @@ -1,560 +0,0 @@ -#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.h" -#include "envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.pb.validate.h" - -#include "common/buffer/buffer_impl.h" - -#include "extensions/filters/network/dubbo_proxy/buffer_helper.h" -#include "extensions/filters/network/dubbo_proxy/filter.h" - -#include "test/extensions/filters/network/dubbo_proxy/utility.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" -#include "test/test_common/printers.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::NiceMock; - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace DubboProxy { - -using ConfigProtocolType = envoy::config::filter::network::dubbo_proxy::v2alpha1::ProtocolType; -using ConfigSerializationType = - envoy::config::filter::network::dubbo_proxy::v2alpha1::SerializationType; - -class DubboFilterTest : public testing::Test { -public: - TimeSource& timeSource() { return factory_context_.dispatcher().timeSource(); } - - void initializeFilter() { - for (const auto& counter : store_.counters()) { - counter->reset(); - } - - filter_ = std::make_unique("test.", ConfigProtocolType::Dubbo, - ConfigSerializationType::Hessian2, store_, timeSource()); - filter_->initializeReadFilterCallbacks(read_filter_callbacks_); - filter_->onNewConnection(); - - // NOP currently. - filter_->onAboveWriteBufferHighWatermark(); - filter_->onBelowWriteBufferLowWatermark(); - } - - void writeHessianErrorResponseMessage(Buffer::Instance& buffer, bool is_event, - int64_t request_id) { - uint8_t msg_type = 0x42; // request message, two_way, not event - - if (is_event) { - msg_type = msg_type | 0x20; - } - - buffer.add(std::string{'\xda', '\xbb'}); - buffer.add(static_cast(&msg_type), 1); - buffer.add(std::string{0x46}); // Response status - addInt64(buffer, request_id); // Request Id - buffer.add(std::string{0x00, 0x00, 0x00, 0x01}); // Body Length - } - - void writeHessianExceptionResponseMessage(Buffer::Instance& buffer, bool is_event, - int64_t request_id) { - uint8_t msg_type = 0x42; // request message, two_way, not event - - if (is_event) { - msg_type = msg_type | 0x20; - } - - buffer.add(std::string{'\xda', '\xbb'}); - buffer.add(static_cast(&msg_type), 1); - buffer.add(std::string{0x14}); - addInt64(buffer, request_id); // Request Id - buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length - '\x90', // return type, exception - 0x05, 't', 'e', 's', 't'}); // return body - } - - void writeInvalidResponseMessage(Buffer::Instance& buffer) { - buffer.add(std::string{ - '\xda', '\xbb', 0x43, 0x14, // Response Message Header, illegal serialization id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Request Id - 0x00, 0x00, 0x00, 0x06, // Body Length - '\x94', // return type - 0x05, 't', 'e', 's', 't', // return body - }); - } - - void writeInvalidRequestMessage(Buffer::Instance& buffer) { - buffer.add(std::string{ - '\xda', '\xbb', '\xc3', 0x00, // Response Message Header, illegal serialization id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Request Id - 0x00, 0x00, 0x00, 0x16, // Body Length - 0x05, '2', '.', '0', '.', '2', // Dubbo version - 0x04, 't', 'e', 's', 't', // Service name - 0x05, '0', '.', '0', '.', '0', // Service version - 0x04, 't', 'e', 's', 't', // method name - }); - } - - void writePartialHessianResponseMessage(Buffer::Instance& buffer, bool is_event, - int64_t request_id, bool start) { - - uint8_t msg_type = 0x42; // request message, two_way, not event - - if (is_event) { - msg_type = msg_type | 0x20; - } - - if (start) { - buffer.add(std::string{'\xda', '\xbb'}); - buffer.add(static_cast(&msg_type), 1); - buffer.add(std::string{0x14}); - addInt64(buffer, request_id); // Request Id - buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length - '\x94'}); // return type, exception - } else { - buffer.add(std::string{0x05, 't', 'e', 's', 't'}); // return body - } - } - - void writeHessianResponseMessage(Buffer::Instance& buffer, bool is_event, int64_t request_id) { - uint8_t msg_type = 0x42; // request message, two_way, not event - - if (is_event) { - msg_type = msg_type | 0x20; - } - - buffer.add(std::string{'\xda', '\xbb'}); - buffer.add(static_cast(&msg_type), 1); - buffer.add(std::string{0x14}); - addInt64(buffer, request_id); // Request Id - buffer.add(std::string{0x00, 0x00, 0x00, 0x06, // Body Length - '\x94', 0x05, 't', 'e', 's', 't'}); // return type, exception - } - - void writePartialHessianRequestMessage(Buffer::Instance& buffer, bool is_one_way, bool is_event, - int64_t request_id, bool start) { - uint8_t msg_type = 0xc2; // request message, two_way, not event - if (is_one_way) { - msg_type = msg_type & 0xbf; - } - - if (is_event) { - msg_type = msg_type | 0x20; - } - - if (start) { - buffer.add(std::string{'\xda', '\xbb'}); - buffer.add(static_cast(&msg_type), 1); - buffer.add(std::string{0x00}); - addInt64(buffer, request_id); // Request Id - buffer.add(std::string{0x00, 0x00, 0x00, 0x16, // Body Length - 0x05, '2', '.', '0', '.', '2'}); // Dubbo version - } else { - buffer.add(std::string{ - 0x04, 't', 'e', 's', 't', // Service name - 0x05, '0', '.', '0', '.', '0', // Service version - 0x04, 't', 'e', 's', 't', // method name - }); - } - } - - void writeHessianRequestMessage(Buffer::Instance& buffer, bool is_one_way, bool is_event, - int64_t request_id) { - uint8_t msg_type = 0xc2; // request message, two_way, not event - if (is_one_way) { - msg_type = msg_type & 0xbf; - } - - if (is_event) { - msg_type = msg_type | 0x20; - } - - buffer.add(std::string{'\xda', '\xbb'}); - buffer.add(static_cast(&msg_type), 1); - buffer.add(std::string{0x00}); - addInt64(buffer, request_id); // Request Id - buffer.add(std::string{0x00, 0x00, 0x00, 0x16, // Body Length - 0x05, '2', '.', '0', '.', '2', // Dubbo version - 0x04, 't', 'e', 's', 't', // Service name - 0x05, '0', '.', '0', '.', '0', // Service version - 0x04, 't', 'e', 's', 't'}); // method name - } - - Buffer::OwnedImpl buffer_; - Buffer::OwnedImpl write_buffer_; - Stats::IsolatedStoreImpl store_; - NiceMock read_filter_callbacks_; - NiceMock factory_context_; - std::unique_ptr filter_; -}; - -TEST_F(DubboFilterTest, OnDataHandlesRequestTwoWay) { - initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 0x0F); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request").value()); - EXPECT_EQ(1U, store_.counter("test.request_twoway").value()); - EXPECT_EQ(0U, store_.counter("test.request_oneway").value()); - EXPECT_EQ(0U, store_.counter("test.request_event").value()); - EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); - EXPECT_EQ(1U, store_.gauge("test.request_active").value()); - EXPECT_EQ(0U, store_.counter("test.response").value()); -} - -TEST_F(DubboFilterTest, OnDataHandlesRequestOneWay) { - initializeFilter(); - bool one_way = true; - writeHessianRequestMessage(buffer_, one_way, false, 0x0F); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request").value()); - EXPECT_EQ(0U, store_.counter("test.request_twoway").value()); - EXPECT_EQ(1U, store_.counter("test.request_oneway").value()); - EXPECT_EQ(0U, store_.counter("test.request_event").value()); - EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); - EXPECT_EQ(0U, store_.gauge("test.request_active").value()); - EXPECT_EQ(0U, store_.counter("test.response").value()); -} - -TEST_F(DubboFilterTest, OnDataHandlesRequestEvent) { - initializeFilter(); - bool event = true; - writeHessianRequestMessage(buffer_, false, event, 0x0F); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request").value()); - EXPECT_EQ(1U, store_.counter("test.request_twoway").value()); - EXPECT_EQ(0U, store_.counter("test.request_oneway").value()); - EXPECT_EQ(1U, store_.counter("test.request_event").value()); - EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); - EXPECT_EQ(1U, store_.gauge("test.request_active").value()); - EXPECT_EQ(0U, store_.counter("test.response").value()); -} - -TEST_F(DubboFilterTest, OnDataHandlesMessageSplitAcrossBuffers) { - initializeFilter(); - writePartialHessianRequestMessage(buffer_, false, false, 0x0F, true); - std::string expected_contents = buffer_.toString(); - uint64_t len = buffer_.length(); - - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - - // Filter passes on the partial buffer, up to the last 6 bytes which it needs to resume the - // decoder on the next call. - std::string contents = buffer_.toString(); - EXPECT_EQ(16, buffer_.length()); - EXPECT_EQ(len - 6, buffer_.length()); - EXPECT_EQ(expected_contents.substr(0, len - 6), contents); - buffer_.drain(buffer_.length()); - - // Complete the buffer - writePartialHessianRequestMessage(buffer_, false, false, 0x0F, false); - expected_contents = expected_contents.substr(len - 6) + buffer_.toString(); - len = buffer_.length(); - - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - - // Filter buffered bytes from end of first buffer and passes them on now. - contents = buffer_.toString(); - EXPECT_EQ(len + 6, buffer_.length()); - EXPECT_EQ(expected_contents, contents); - - EXPECT_EQ(1U, store_.counter("test.request_twoway").value()); - EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); -} - -TEST_F(DubboFilterTest, OnDataHandlesProtocolError) { - initializeFilter(); - writeInvalidRequestMessage(buffer_); - uint64_t len = buffer_.length(); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request_decoding_error").value()); - EXPECT_EQ(len, buffer_.length()); - - // Sniffing is now disabled. - buffer_.drain(buffer_.length()); - bool one_way = true; - writeHessianRequestMessage(buffer_, one_way, false, 0x0F); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(0U, store_.counter("test.request").value()); -} - -TEST_F(DubboFilterTest, OnDataHandlesProtocolErrorOnWrite) { - initializeFilter(); - - // Start the read buffer - writePartialHessianRequestMessage(buffer_, false, false, 0x0F, true); - uint64_t len = buffer_.length(); - - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - len -= buffer_.length(); - - // Disable sniffing - writeInvalidRequestMessage(write_buffer_); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.response_decoding_error").value()); - - // Complete the read buffer - writePartialHessianRequestMessage(buffer_, false, false, 0x0F, false); - len += buffer_.length(); - - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - len -= buffer_.length(); - EXPECT_EQ(0, len); -} - -TEST_F(DubboFilterTest, OnDataStopsSniffingWithTooManyPendingCalls) { - initializeFilter(); - for (int i = 0; i < 64; i++) { - writeHessianRequestMessage(buffer_, false, false, i); - } - - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(64U, store_.gauge("test.request_active").value()); - buffer_.drain(buffer_.length()); - - // Sniffing is now disabled. - writeInvalidRequestMessage(buffer_); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(64U, store_.gauge("test.request_active").value()); - EXPECT_EQ(1U, store_.counter("test.request_decoding_error").value()); -} - -TEST_F(DubboFilterTest, OnWriteHandlesResponse) { - initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 0x0F); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request").value()); - EXPECT_EQ(1U, store_.gauge("test.request_active").value()); - - writeHessianResponseMessage(write_buffer_, false, 0x0F); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - EXPECT_EQ(1U, store_.counter("test.response").value()); - EXPECT_EQ(1U, store_.counter("test.response_success").value()); - EXPECT_EQ(0U, store_.counter("test.response_error").value()); - EXPECT_EQ(0U, store_.counter("test.response_exception").value()); - EXPECT_EQ(0U, store_.counter("test.response_decoding_error").value()); - EXPECT_EQ(0U, store_.gauge("test.request_active").value()); -} - -TEST_F(DubboFilterTest, OnWriteHandlesOutOrOrderResponse) { - initializeFilter(); - - // set up two requests - writeHessianRequestMessage(buffer_, false, false, 1); - writeHessianRequestMessage(buffer_, false, false, 2); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(2U, store_.counter("test.request").value()); - EXPECT_EQ(2U, store_.gauge("test.request_active").value()); - - writeHessianResponseMessage(write_buffer_, false, 2); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - EXPECT_EQ(1U, store_.counter("test.response").value()); - EXPECT_EQ(1U, store_.counter("test.response_success").value()); - EXPECT_EQ(0U, store_.counter("test.response_error").value()); - EXPECT_EQ(1U, store_.gauge("test.request_active").value()); - - write_buffer_.drain(write_buffer_.length()); - writeHessianResponseMessage(write_buffer_, false, 1); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - EXPECT_EQ(2U, store_.counter("test.response").value()); - EXPECT_EQ(2U, store_.counter("test.response_success").value()); - EXPECT_EQ(0U, store_.counter("test.response_error").value()); - EXPECT_EQ(0U, store_.gauge("test.request_active").value()); -} - -TEST_F(DubboFilterTest, OnWriteHandlesFrameSplitAcrossBuffers) { - initializeFilter(); - - writeHessianRequestMessage(buffer_, false, false, 1); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - - writePartialHessianResponseMessage(write_buffer_, false, 1, true); - std::string expected_contents = write_buffer_.toString(); - uint64_t len = write_buffer_.length(); - - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - // Filter passes on the partial buffer, up to the last 1 bytes which it needs to resume the - // decoder on the next call. - std::string contents = write_buffer_.toString(); - EXPECT_EQ(len - 1, write_buffer_.length()); - EXPECT_EQ(expected_contents.substr(0, len - 1), contents); - - write_buffer_.drain(write_buffer_.length()); - - // Complete the buffer - writePartialHessianResponseMessage(write_buffer_, false, 1, false); - expected_contents = expected_contents.substr(len - 1) + write_buffer_.toString(); - len = write_buffer_.length(); - - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - // Filter buffered bytes from end of first buffer and passes them on now. - contents = write_buffer_.toString(); - EXPECT_EQ(len + 1, write_buffer_.length()); - EXPECT_EQ(expected_contents, contents); - - EXPECT_EQ(1U, store_.counter("test.response").value()); - EXPECT_EQ(1U, store_.counter("test.response_success").value()); - EXPECT_EQ(0U, store_.counter("test.response_decoding_error").value()); -} - -TEST_F(DubboFilterTest, OnWriteHandlesResponseException) { - initializeFilter(); - - writeHessianRequestMessage(buffer_, false, false, 1); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request").value()); - EXPECT_EQ(1U, store_.gauge("test.request_active").value()); - - writeHessianExceptionResponseMessage(write_buffer_, false, 1); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - EXPECT_EQ(1U, store_.counter("test.response").value()); - EXPECT_EQ(1U, store_.counter("test.response_success").value()); - EXPECT_EQ(0U, store_.counter("test.response_error").value()); - EXPECT_EQ(1U, store_.counter("test.response_exception").value()); - EXPECT_EQ(0U, store_.counter("test.response_decoding_error").value()); - EXPECT_EQ(0U, store_.gauge("test.request_active").value()); -} - -TEST_F(DubboFilterTest, OnWriteHandlesResponseError) { - initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request").value()); - EXPECT_EQ(1U, store_.gauge("test.request_active").value()); - - writeHessianErrorResponseMessage(write_buffer_, false, 1); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - EXPECT_EQ(1U, store_.counter("test.response").value()); - EXPECT_EQ(0U, store_.counter("test.response_success").value()); - EXPECT_EQ(1U, store_.counter("test.response_error").value()); - EXPECT_EQ(0U, store_.counter("test.response_exception").value()); - EXPECT_EQ(0U, store_.counter("test.response_decoding_error").value()); - EXPECT_EQ(0U, store_.gauge("test.request_active").value()); -} - -TEST_F(DubboFilterTest, OnWriteHandlesProtocolError) { - initializeFilter(); - writeInvalidResponseMessage(write_buffer_); - uint64_t len = buffer_.length(); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.response_decoding_error").value()); - EXPECT_EQ(len, buffer_.length()); - - // Sniffing is now disabled. - write_buffer_.drain(write_buffer_.length()); - writeHessianResponseMessage(write_buffer_, false, 1); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); -} - -TEST_F(DubboFilterTest, OnWriteHandlesProtocolErrorOnData) { - initializeFilter(); - - // Set up a request for the partial write - writeHessianRequestMessage(buffer_, false, false, 1); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - buffer_.drain(buffer_.length()); - - // Start the write buffer - - writePartialHessianResponseMessage(write_buffer_, false, 1, true); - uint64_t len = write_buffer_.length(); - - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - len -= write_buffer_.length(); - - // Force an error on the next request. - writeInvalidRequestMessage(buffer_); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.request_decoding_error").value()); - - // Complete the read buffer - writePartialHessianResponseMessage(write_buffer_, false, 1, false); - len += write_buffer_.length(); - - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - len -= write_buffer_.length(); - EXPECT_EQ(0, len); -} - -TEST_F(DubboFilterTest, OnEvent) { - // No active calls - { - initializeFilter(); - filter_->onEvent(Network::ConnectionEvent::RemoteClose); - filter_->onEvent(Network::ConnectionEvent::LocalClose); - EXPECT_EQ(0U, store_.counter("test.cx_destroy_local_with_active_rq").value()); - EXPECT_EQ(0U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); - } - - // Close mid-request - { - initializeFilter(); - writePartialHessianRequestMessage(buffer_, false, false, 1, true); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - - filter_->onEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_EQ(1U, store_.counter("test.cx_destroy_local_with_active_rq").value()); - - filter_->onEvent(Network::ConnectionEvent::LocalClose); - EXPECT_EQ(1U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); - - buffer_.drain(buffer_.length()); - } - - // Close before response - { - initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - - filter_->onEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_EQ(1U, store_.counter("test.cx_destroy_local_with_active_rq").value()); - - filter_->onEvent(Network::ConnectionEvent::LocalClose); - EXPECT_EQ(1U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); - - buffer_.drain(buffer_.length()); - } - - // Close mid-response - { - initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - writePartialHessianResponseMessage(write_buffer_, false, 1, true); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - - filter_->onEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_EQ(0U, store_.counter("test.cx_destroy_local_with_active_rq").value()); - - filter_->onEvent(Network::ConnectionEvent::LocalClose); - EXPECT_EQ(0U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); - - buffer_.drain(buffer_.length()); - write_buffer_.drain(write_buffer_.length()); - } -} - -TEST_F(DubboFilterTest, ResponseWithUnknownSequenceID) { - initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); - EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::Continue); - - writeHessianResponseMessage(write_buffer_, false, 10); - EXPECT_EQ(filter_->onWrite(write_buffer_, false), Network::FilterStatus::Continue); - EXPECT_EQ(1U, store_.counter("test.response_decoding_error").value()); -} - -} // namespace DubboProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl_test.cc b/test/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl_test.cc index f9bb1b9337a45..38a66a6ffdbf9 100644 --- a/test/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/hessian_deserializer_impl_test.cc @@ -32,10 +32,11 @@ TEST(HessianProtocolTest, deserializeRpcInvocation) { 0x05, '0', '.', '0', '.', '0', // Service version 0x04, 't', 'e', 's', 't', // method name })); - auto invo = deserializer.deserializeRpcInvocation(buffer, buffer.length()); - EXPECT_STREQ("test", invo->getMethodName().c_str()); - EXPECT_STREQ("test", invo->getServiceName().c_str()); - EXPECT_STREQ("0.0.0", invo->getServiceVersion().c_str()); + MessageMetadataSharedPtr metadata = std::make_shared(); + deserializer.deserializeRpcInvocation(buffer, buffer.length(), metadata); + EXPECT_STREQ("test", metadata->method_name().value().c_str()); + EXPECT_STREQ("test", metadata->service_name().c_str()); + EXPECT_STREQ("0.0.0", metadata->service_version().value().c_str()); } // incorrect body size @@ -49,8 +50,10 @@ TEST(HessianProtocolTest, deserializeRpcInvocation) { })); std::string exception_string = fmt::format("RpcInvocation size({}) large than body size({})", buffer.length(), buffer.length() - 1); - EXPECT_THROW_WITH_MESSAGE(deserializer.deserializeRpcInvocation(buffer, buffer.length() - 1), - EnvoyException, exception_string); + MessageMetadataSharedPtr metadata = std::make_shared(); + EXPECT_THROW_WITH_MESSAGE( + deserializer.deserializeRpcInvocation(buffer, buffer.length() - 1, metadata), + EnvoyException, exception_string); } } diff --git a/test/extensions/filters/network/dubbo_proxy/mocks.cc b/test/extensions/filters/network/dubbo_proxy/mocks.cc index b12f2d6ec8e5b..d6ecb1e10546d 100644 --- a/test/extensions/filters/network/dubbo_proxy/mocks.cc +++ b/test/extensions/filters/network/dubbo_proxy/mocks.cc @@ -2,6 +2,7 @@ #include +#include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "gtest/gtest.h" @@ -25,8 +26,9 @@ MockDecoderEventHandler::MockDecoderEventHandler() { ON_CALL(*this, transferBodyTo(_, _)).WillByDefault(Return(Network::FilterStatus::Continue)); } -MockProtocolCallbacks::MockProtocolCallbacks() {} -MockProtocolCallbacks::~MockProtocolCallbacks() {} +MockDecoderCallbacks::MockDecoderCallbacks() { + ON_CALL(*this, newDecoderEventHandler()).WillByDefault(Return(&handler_)); +} MockProtocol::MockProtocol() { ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); @@ -50,8 +52,16 @@ MockDecoderFilter::MockDecoderFilter() { ON_CALL(*this, transportEnd()).WillByDefault(Return(Network::FilterStatus::Continue)); ON_CALL(*this, messageBegin(_, _, _)).WillByDefault(Return(Network::FilterStatus::Continue)); ON_CALL(*this, messageEnd(_)).WillByDefault(Return(Network::FilterStatus::Continue)); - ON_CALL(*this, transferHeaderTo(_, _)).WillByDefault(Return(Network::FilterStatus::Continue)); - ON_CALL(*this, transferBodyTo(_, _)).WillByDefault(Return(Network::FilterStatus::Continue)); + ON_CALL(*this, transferHeaderTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& buf, size_t size) -> Network::FilterStatus { + buf.drain(size); + return Network::FilterStatus::Continue; + })); + ON_CALL(*this, transferBodyTo(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& buf, size_t size) -> Network::FilterStatus { + buf.drain(size); + return Network::FilterStatus::Continue; + })); } MockDecoderFilter::~MockDecoderFilter() {} @@ -70,7 +80,7 @@ MockDirectResponse::~MockDirectResponse() {} MockFilterConfigFactory::MockFilterConfigFactory() : MockFactoryBase("envoy.filters.dubbo.mock_filter"), - mock_filter_(std::make_unique>()) {} + mock_filter_(std::make_shared>()) {} MockFilterConfigFactory::~MockFilterConfigFactory() {} diff --git a/test/extensions/filters/network/dubbo_proxy/mocks.h b/test/extensions/filters/network/dubbo_proxy/mocks.h index cbe6c1e13d365..4de8c76982ca1 100644 --- a/test/extensions/filters/network/dubbo_proxy/mocks.h +++ b/test/extensions/filters/network/dubbo_proxy/mocks.h @@ -1,5 +1,8 @@ #pragma once +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + #include "extensions/filters/network/dubbo_proxy/decoder_event_handler.h" #include "extensions/filters/network/dubbo_proxy/filters/factory_base.h" #include "extensions/filters/network/dubbo_proxy/filters/filter.h" @@ -31,10 +34,21 @@ class MockDecoderEventHandler : public DecoderEventHandler { MOCK_METHOD2(transferBodyTo, Network::FilterStatus(Buffer::Instance&, size_t)); }; +class MockDecoderCallbacks : public DecoderCallbacks { +public: + MockDecoderCallbacks(); + ~MockDecoderCallbacks() = default; + + MOCK_METHOD0(newDecoderEventHandler, DecoderEventHandler*()); + MOCK_METHOD1(onHeartbeat, void(MessageMetadataSharedPtr)); + + MockDecoderEventHandler handler_; +}; + class MockProtocolCallbacks : public ProtocolCallbacks { public: - MockProtocolCallbacks(); - ~MockProtocolCallbacks(); + MockProtocolCallbacks() = default; + ~MockProtocolCallbacks() = default; void onRequestMessage(RequestMessagePtr&& req) override { onRequestMessageRvr(req.get()); } void onResponseMessage(ResponseMessagePtr&& res) override { onResponseMessageRvr(res.get()); } @@ -67,7 +81,7 @@ class MockDeserializer : public Deserializer { // DubboProxy::Deserializer MOCK_CONST_METHOD0(name, const std::string&()); MOCK_CONST_METHOD0(type, SerializationType()); - MOCK_METHOD2(deserializeRpcInvocation, RpcInvocationPtr(Buffer::Instance&, size_t)); + MOCK_METHOD3(deserializeRpcInvocation, void(Buffer::Instance&, size_t, MessageMetadataSharedPtr)); MOCK_METHOD2(deserializeRpcResult, RpcResultPtr(Buffer::Instance&, size_t)); MOCK_METHOD3(serializeRpcResult, size_t(Buffer::Instance&, const std::string&, RpcResponseType)); diff --git a/test/extensions/filters/network/dubbo_proxy/router_test.cc b/test/extensions/filters/network/dubbo_proxy/router_test.cc index 59d071a8d4a2a..a5fec366c8108 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_test.cc @@ -45,7 +45,6 @@ class TestNamedProtocolConfigFactory : public NamedProtocolConfigFactory { public: TestNamedProtocolConfigFactory(std::function f) : f_(f) {} - ProtocolPtr createProtocol(ProtocolCallbacks&) override { return ProtocolPtr{f_()}; } ProtocolPtr createProtocol() override { return ProtocolPtr{f_()}; } std::string name() override { return ProtocolNames::get().fromType(ProtocolType::Dubbo); } From 000d278896ca9b5181293130de107d2fb733b9a0 Mon Sep 17 00:00:00 2001 From: danzh Date: Fri, 12 Apr 2019 09:43:49 -0400 Subject: [PATCH 110/165] Implement QuicStreamBufferAllocator (#6550) * Implement QuicStreamBufferAllocator Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 15 ++++++++++++ bazel/repository_locations.bzl | 6 ++--- .../quic_listeners/quiche/platform/BUILD | 6 ++++- .../quic_stream_buffer_allocator_impl.h | 18 ++++++++++++++ .../quic_listeners/quiche/platform/BUILD | 2 ++ .../quiche/platform/quic_platform_test.cc | 24 ++++++++++++++++--- 6 files changed, 64 insertions(+), 7 deletions(-) create mode 100644 source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 69cae5ea7c61f..d3b82aad1b915 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -169,6 +169,7 @@ cc_library( "quiche/quic/platform/api/quic_ptr_util.h", "quiche/quic/platform/api/quic_reference_counted.h", "quiche/quic/platform/api/quic_server_stats.h", + "quiche/quic/platform/api/quic_stream_buffer_allocator.h", "quiche/quic/platform/api/quic_string_piece.h", "quiche/quic/platform/api/quic_test_output.h", "quiche/quic/platform/api/quic_uint128.h", @@ -223,6 +224,20 @@ cc_library( deps = [":quic_platform"], ) +cc_library( + name = "quic_buffer_allocator_lib", + srcs = [ + "quiche/quic/core/quic_buffer_allocator.cc", + "quiche/quic/core/quic_simple_buffer_allocator.cc", + ], + hdrs = [ + "quiche/quic/core/quic_buffer_allocator.h", + "quiche/quic/core/quic_simple_buffer_allocator.h", + ], + visibility = ["//visibility:public"], + deps = [":quic_platform_export"], +) + envoy_cc_test( name = "http2_platform_test", srcs = envoy_select_quiche( diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index b363f4f4bfc83..f2f8671998833 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -236,8 +236,8 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/subpar/archive/1.3.0.tar.gz"], ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/4fbea5de9afdf30611b27afd54c45a596944f9c2.tar.gz - sha256 = "2cf9f5ea62a03ca0d8773fe4f56949b72c28ac5b1bcf43d850a571f4e32add2a", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/4fbea5de9afdf30611b27afd54c45a596944f9c2.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ba6354aa1b39f3d9788ead909ad3e678ac863938.tar.gz + sha256 = "4598537810c3d343c32333c5367fcb652638018118f7f4e844e080405d9e73bb", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/ba6354aa1b39f3d9788ead909ad3e678ac863938.tar.gz"], ), ) diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 4244e69f7c91b..0cfc9373b4e9c 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -102,6 +102,7 @@ envoy_cc_library( "quic_ptr_util_impl.h", "quic_server_stats_impl.h", "quic_string_piece_impl.h", + "quic_stream_buffer_allocator_impl.h", "quic_uint128_impl.h", ] + envoy_select_quiche([ "quic_expect_bug_impl.h", @@ -121,7 +122,10 @@ envoy_cc_library( "googletest", ], visibility = ["//visibility:public"], - deps = ["@com_googlesource_quiche//:quic_platform_export"] + envoy_select_quiche([ + deps = [ + "@com_googlesource_quiche//:quic_platform_export", + "@com_googlesource_quiche//:quic_buffer_allocator_lib", + ] + envoy_select_quiche([ ":quic_platform_logging_impl_lib", "//include/envoy/thread:thread_interface", "//source/common/common:assert_lib", diff --git a/source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h new file mode 100644 index 0000000000000..c2a40a0e7427a --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_stream_buffer_allocator_impl.h @@ -0,0 +1,18 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "quiche/quic/core/quic_simple_buffer_allocator.h" + +namespace quic { + +// Implements the interface required by +// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/quic_stream_buffer_allocator.h +// with the default implementation provided by QUICHE. +using QuicStreamBufferAllocatorImpl = SimpleBufferAllocator; + +} // namespace quic diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index 1518271bc8d2a..d2ec6aa11f9b1 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -31,6 +31,8 @@ envoy_cc_test( ], external_deps = ["quiche_quic_platform"], deps = [ + "//source/common/memory:stats_lib", + "//test/common/stats:stat_test_utility_lib", "//test/extensions/transport_sockets/tls:ssl_test_utils", "//test/mocks/api:api_mocks", "//test/test_common:logging_lib", diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 294e58e708ba0..df87edca85dd4 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -9,8 +9,10 @@ #include #include +#include "common/memory/stats.h" #include "common/network/utility.h" +#include "test/common/stats/stat_test_utility.h" #include "test/extensions/transport_sockets/tls/ssl_test_utility.h" #include "test/mocks/api/mocks.h" #include "test/test_common/environment.h" @@ -42,20 +44,20 @@ #include "quiche/quic/platform/api/quic_server_stats.h" #include "quiche/quic/platform/api/quic_sleep.h" #include "quiche/quic/platform/api/quic_stack_trace.h" +#include "quiche/quic/platform/api/quic_stream_buffer_allocator.h" #include "quiche/quic/platform/api/quic_string_piece.h" #include "quiche/quic/platform/api/quic_test_output.h" #include "quiche/quic/platform/api/quic_thread.h" #include "quiche/quic/platform/api/quic_uint128.h" -using testing::_; -using testing::HasSubstr; - // Basic tests to validate functioning of the QUICHE quic platform // implementation. For platform APIs in which the implementation is a simple // typedef/passthrough to a std:: or absl:: construct, the tests are kept // minimal, and serve primarily to verify the APIs compile and link without // issue. +using testing::_; +using testing::HasSubstr; using testing::Return; namespace quic { @@ -572,5 +574,21 @@ TEST_F(QuicPlatformTest, FailToPickUnsedPort) { EXPECT_DEATH_LOG_TO_STDERR(QuicPickUnusedPortOrDie(), "Failed to pick a port for test."); } +TEST_F(QuicPlatformTest, TestEnvoyQuicBufferAllocator) { + bool deterministic_stats = Envoy::Stats::TestUtil::hasDeterministicMallocStats(); + const size_t start_mem = Envoy::Memory::Stats::totalCurrentlyAllocated(); + QuicStreamBufferAllocator allocator; + char* p = allocator.New(1024); + if (deterministic_stats) { + EXPECT_LT(start_mem, Envoy::Memory::Stats::totalCurrentlyAllocated()); + } + EXPECT_NE(nullptr, p); + memset(p, 'a', 1024); + allocator.Delete(p); + if (deterministic_stats) { + EXPECT_EQ(start_mem, Envoy::Memory::Stats::totalCurrentlyAllocated()); + } +} + } // namespace } // namespace quic From cdaeb1344e4136d8c9ac33507005159c93087a49 Mon Sep 17 00:00:00 2001 From: Andres Guedez <34292400+AndresGuedez@users.noreply.github.com> Date: Fri, 12 Apr 2019 11:39:09 -0400 Subject: [PATCH 111/165] http: mitigate delayed close timeout race with connection write buffer flush (#6437) Change the behavior of the delayed_close_timeout such that it won't trigger unless there has been at least a delayed_close_timeout period of inactivity after the last write event on the socket pending to be closed. This mitigates a race where a slow client and/or low timeout value would cause the socket to be closed while data was actively being written to the socket. Note that this change does not eliminate this race since a slow client could still be considered idle by the updated timeout logic, but this should be very rare when useful values (i.e., >1s to avoid the race condition on close that this timer addresses) are configured. Risk Level: Medium Testing: New unit tests added Docs Changes: Updated version history and HttpConnectionManager proto doc Fixes #6392 Signed-off-by: Andres Guedez --- .../v2/http_connection_manager.proto | 21 +- docs/root/intro/version_history.rst | 1 + include/envoy/network/connection.h | 1 + source/common/network/connection_impl.cc | 102 +++++--- source/common/network/connection_impl.h | 22 +- test/common/network/connection_impl_test.cc | 237 ++++++++++++++---- .../integration/tcp_proxy_integration_test.cc | 2 +- tools/spelling_dictionary.txt | 1 + 8 files changed, 291 insertions(+), 96 deletions(-) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index b2bc2b8e3c3e2..18a479d3d7f97 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -200,8 +200,14 @@ message HttpConnectionManager { // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will flush the write buffers for the connection and await the peer to close - // (i.e., a TCP FIN/RST is received by Envoy from the downstream connection). + // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + // from the downstream connection) prior to Envoy closing the socket associated with that + // connection. + // NOTE: This timeout is enforced even when the socket associated with the downstream connection + // is pending a flush of the write buffer. However, any progress made writing data to the socket + // will restart the timer associated with this timeout. This means that the total grace period for + // a socket in this state will be + // +. // // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close // sequence mitigates a race condition that exists when downstream clients do not drain/process @@ -213,8 +219,15 @@ message HttpConnectionManager { // // The default timeout is 1000 ms if this option is not specified. // - // A value of 0 will completely disable delayed close processing, and the downstream connection's - // socket will be closed immediately after the write flush is completed. + // .. NOTE:: + // To be useful in avoiding the race condition described above, this timeout must be set + // to *at least* +<100ms to account for + // a reasonsable "worst" case processing time for a full iteration of Envoy's event loop>. + // + // .. WARNING:: + // A value of 0 will completely disable delayed close processing. When disabled, the downstream + // connection's socket will be closed immediately after the write flush is completed or will + // never close if the write flush does not complete. google.protobuf.Duration delayed_close_timeout = 26 [(gogoproto.stdduration) = true]; // Configuration for :ref:`HTTP access logs ` diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index ed65a13e15ca3..6f9c814fd80a6 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -4,6 +4,7 @@ Version history 1.11.0 (Pending) ================ * dubbo_proxy: support the :ref:`Dubbo proxy filter `. +* http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. 1.10.0 (Apr 5, 2019) diff --git a/include/envoy/network/connection.h b/include/envoy/network/connection.h index 28420a3d5f4a3..20cf091dcae94 100644 --- a/include/envoy/network/connection.h +++ b/include/envoy/network/connection.h @@ -254,6 +254,7 @@ class Connection : public Event::DeferredDeletable, public FilterManager { /** * Set the timeout for delayed connection close()s. + * This can only be called prior to issuing a close() on the connection. * @param timeout The timeout value in milliseconds */ virtual void setDelayedCloseTimeout(std::chrono::milliseconds timeout) PURE; diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index e4e1753290a43..871ac6e35433c 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -99,6 +99,7 @@ void ConnectionImpl::close(ConnectionCloseType type) { uint64_t data_to_write = write_buffer_->length(); ENVOY_CONN_LOG(debug, "closing data_to_write={} type={}", *this, data_to_write, enumToInt(type)); + const bool delayed_close_timeout_set = delayedCloseTimeout().count() > 0; if (data_to_write == 0 || type == ConnectionCloseType::NoFlush || !transport_socket_->canFlushClose()) { if (data_to_write > 0) { @@ -107,13 +108,25 @@ void ConnectionImpl::close(ConnectionCloseType type) { transport_socket_->doWrite(*write_buffer_, true); } - closeSocket(ConnectionEvent::LocalClose); + if (type == ConnectionCloseType::FlushWriteAndDelay && delayed_close_timeout_set) { + // The socket is being closed and either there is no more data to write or the data can not be + // flushed (!transport_socket_->canFlushClose()). Since a delayed close has been requested, + // start the delayed close timer if it hasn't been done already by a previous close(). + // NOTE: Even though the delayed_close_state_ is being set to CloseAfterFlushAndWait, since + // a write event is not being registered for the socket, this logic is simply setting the + // timer and waiting for it to trigger to close the socket. + if (!inDelayedClose()) { + initializeDelayedCloseTimer(); + delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait; + } + } else { + closeSocket(ConnectionEvent::LocalClose); + } } else { ASSERT(type == ConnectionCloseType::FlushWrite || type == ConnectionCloseType::FlushWriteAndDelay); - // No need to continue if a FlushWrite/FlushWriteAndDelay has already been issued and there is a - // pending delayed close. + // If there is a pending delayed close, simply update the delayed close state. // // An example of this condition manifests when a downstream connection is closed early by Envoy, // such as when a route can't be matched: @@ -123,35 +136,31 @@ void ConnectionImpl::close(ConnectionCloseType type) { // ConnectionManagerImpl::checkForDeferredClose() // 2) A second close is issued by a subsequent call to // ConnectionManagerImpl::checkForDeferredClose() prior to returning from onData() - if (delayed_close_) { + if (inDelayedClose()) { + // Validate that a delayed close timer is already enabled unless it was disabled via + // configuration. + ASSERT(!delayed_close_timeout_set || delayed_close_timer_ != nullptr); + if (type == ConnectionCloseType::FlushWrite || !delayed_close_timeout_set) { + delayed_close_state_ = DelayedCloseState::CloseAfterFlush; + } else { + delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait; + } return; } - delayed_close_ = true; - const bool delayed_close_timeout_set = delayedCloseTimeout().count() > 0; - - // NOTE: the delayed close timeout (if set) affects both FlushWrite and FlushWriteAndDelay - // closes: - // 1. For FlushWrite, the timeout sets an upper bound on how long to wait for the flush to - // complete before the connection is locally closed. - // 2. For FlushWriteAndDelay, the timeout specifies an upper bound on how long to wait for the - // flush to complete and the peer to close the connection before it is locally closed. - // All close types that follow do not actually close() the socket immediately so that buffered // data can be written. However, we do want to stop reading to apply TCP backpressure. read_enabled_ = false; - // Force a closeSocket() after the write buffer is flushed if the close_type calls for it or if - // no delayed close timeout is set. - close_after_flush_ = !delayed_close_timeout_set || type == ConnectionCloseType::FlushWrite; - - // Create and activate a timer which will immediately close the connection if triggered. - // A config value of 0 disables the timeout. + // NOTE: At this point, it's already been validated that the connection is not already in + // delayed close processing and therefore the timer has not yet been created. if (delayed_close_timeout_set) { - delayed_close_timer_ = dispatcher_.createTimer([this]() -> void { onDelayedCloseTimeout(); }); - ENVOY_CONN_LOG(debug, "setting delayed close timer with timeout {} ms", *this, - delayedCloseTimeout().count()); - delayed_close_timer_->enableTimer(delayedCloseTimeout()); + initializeDelayedCloseTimer(); + delayed_close_state_ = (type == ConnectionCloseType::FlushWrite) + ? DelayedCloseState::CloseAfterFlush + : DelayedCloseState::CloseAfterFlushAndWait; + } else { + delayed_close_state_ = DelayedCloseState::CloseAfterFlush; } file_event_->setEnabled(Event::FileReadyType::Write | @@ -162,7 +171,7 @@ void ConnectionImpl::close(ConnectionCloseType type) { Connection::State ConnectionImpl::state() const { if (!ioHandle().isOpen()) { return State::Closed; - } else if (delayed_close_) { + } else if (inDelayedClose()) { return State::Closing; } else { return State::Open; @@ -534,21 +543,37 @@ void ConnectionImpl::onWriteReady() { uint64_t new_buffer_size = write_buffer_->length(); updateWriteBufferStats(result.bytes_processed_, new_buffer_size); + // NOTE: If the delayed_close_timer_ is set, it must only trigger after a delayed_close_timeout_ + // period of inactivity from the last write event. Therefore, the timer must be reset to its + // original timeout value unless the socket is going to be closed as a result of the doWrite(). + if (result.action_ == PostIoAction::Close) { // It is possible (though unlikely) for the connection to have already been closed during the // write callback. This can happen if we manage to complete the SSL handshake in the write // callback, raise a connected event, and close the connection. closeSocket(ConnectionEvent::RemoteClose); - } else if ((close_after_flush_ && new_buffer_size == 0) || bothSidesHalfClosed()) { + } else if ((inDelayedClose() && new_buffer_size == 0) || bothSidesHalfClosed()) { ENVOY_CONN_LOG(debug, "write flush complete", *this); - closeSocket(ConnectionEvent::LocalClose); - } else if (result.action_ == PostIoAction::KeepOpen && result.bytes_processed_ > 0) { - for (BytesSentCb& cb : bytes_sent_callbacks_) { - cb(result.bytes_processed_); - - // If a callback closes the socket, stop iterating. - if (!ioHandle().isOpen()) { - return; + if (delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) { + ASSERT(delayed_close_timer_ != nullptr); + delayed_close_timer_->enableTimer(delayedCloseTimeout()); + } else { + ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush); + closeSocket(ConnectionEvent::LocalClose); + } + } else { + ASSERT(result.action_ == PostIoAction::KeepOpen); + if (delayed_close_timer_ != nullptr) { + delayed_close_timer_->enableTimer(delayedCloseTimeout()); + } + if (result.bytes_processed_ > 0) { + for (BytesSentCb& cb : bytes_sent_callbacks_) { + cb(result.bytes_processed_); + + // If a callback closes the socket, stop iterating. + if (!ioHandle().isOpen()) { + return; + } } } } @@ -587,6 +612,7 @@ bool ConnectionImpl::bothSidesHalfClosed() { } void ConnectionImpl::onDelayedCloseTimeout() { + delayed_close_timer_.reset(); ENVOY_CONN_LOG(debug, "triggered delayed close", *this); if (connection_stats_ != nullptr && connection_stats_->delayed_close_timeouts_ != nullptr) { connection_stats_->delayed_close_timeouts_->inc(); @@ -594,6 +620,14 @@ void ConnectionImpl::onDelayedCloseTimeout() { closeSocket(ConnectionEvent::LocalClose); } +void ConnectionImpl::initializeDelayedCloseTimer() { + const auto timeout = delayedCloseTimeout().count(); + ASSERT(delayed_close_timer_ == nullptr && timeout > 0); + delayed_close_timer_ = dispatcher_.createTimer([this]() -> void { onDelayedCloseTimeout(); }); + ENVOY_CONN_LOG(debug, "setting delayed close timer with timeout {} ms", *this, timeout); + delayed_close_timer_->enableTimer(delayedCloseTimeout()); +} + absl::string_view ConnectionImpl::transportFailureReason() const { return transport_socket_->failureReason(); } diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index c2fb2584746d2..97e0e21c7600c 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -122,6 +122,8 @@ class ConnectionImpl : public virtual Connection, static uint64_t nextGlobalIdForTest() { return next_global_id_; } void setDelayedCloseTimeout(std::chrono::milliseconds timeout) override { + // Validate that this is only called prior to issuing a close() or closeSocket(). + ASSERT(delayed_close_timer_ == nullptr && ioHandle().isOpen()); delayed_close_timeout_ = timeout; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } @@ -167,16 +169,32 @@ class ConnectionImpl : public virtual Connection, // Callback issued when a delayed close timeout triggers. void onDelayedCloseTimeout(); + void initializeDelayedCloseTimer(); + bool inDelayedClose() const { return delayed_close_state_ != DelayedCloseState::None; } + static std::atomic next_global_id_; + // States associated with delayed closing of the connection (i.e., when the underlying socket is + // not immediately close()d as a result of a ConnectionImpl::close()). + enum class DelayedCloseState { + None, + // The socket will be closed immediately after the buffer is flushed _or_ if a period of + // inactivity after the last write event greater than or equal to delayed_close_timeout_ has + // elapsed. + CloseAfterFlush, + // The socket will be closed after a grace period of delayed_close_timeout_ has elapsed after + // the socket is flushed _or_ if a period of inactivity after the last write event greater than + // or equal to delayed_close_timeout_ has elapsed. + CloseAfterFlushAndWait + }; + DelayedCloseState delayed_close_state_{DelayedCloseState::None}; + Event::Dispatcher& dispatcher_; const uint64_t id_; Event::TimerPtr delayed_close_timer_; std::list callbacks_; std::list bytes_sent_callbacks_; bool read_enabled_{true}; - bool close_after_flush_{false}; - bool delayed_close_{false}; bool above_high_watermark_{false}; bool detect_early_close_{true}; bool enable_half_close_{false}; diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 95a6c6062f5c7..66b3b2bcc79c4 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -176,9 +176,11 @@ class ConnectionImplTest : public testing::TestWithParam { protected: struct ConnectionMocks { - std::unique_ptr> dispatcher; - Event::MockTimer* timer; - std::unique_ptr> transport_socket; + std::unique_ptr> dispatcher_; + Event::MockTimer* timer_; + std::unique_ptr> transport_socket_; + NiceMock* file_event_; + Event::FileReadyCb* file_ready_cb_; }; ConnectionMocks createConnectionMocks() { @@ -193,17 +195,20 @@ class ConnectionImplTest : public testing::TestWithParam { // This timer will be returned (transferring ownership) to the ConnectionImpl when createTimer() // is called to allocate the delayed close timer. - auto timer = new Event::MockTimer(dispatcher.get()); + Event::MockTimer* timer = new Event::MockTimer(dispatcher.get()); - auto file_event = std::make_unique>(); - EXPECT_CALL(*dispatcher, createFileEvent_(0, _, _, _)).WillOnce(Return(file_event.release())); + NiceMock* file_event = new NiceMock; + EXPECT_CALL(*dispatcher, createFileEvent_(0, _, _, _)) + .WillOnce(DoAll(SaveArg<1>(&file_ready_cb_), Return(file_event))); auto transport_socket = std::make_unique>(); - EXPECT_CALL(*transport_socket, canFlushClose()).WillOnce(Return(true)); + EXPECT_CALL(*transport_socket, canFlushClose()).WillRepeatedly(Return(true)); - return ConnectionMocks{std::move(dispatcher), timer, std::move(transport_socket)}; + return ConnectionMocks{std::move(dispatcher), timer, std::move(transport_socket), file_event, + &file_ready_cb_}; } + Event::FileReadyCb file_ready_cb_; Event::SimulatedTimeSystem time_system_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; @@ -987,41 +992,6 @@ TEST_P(ConnectionImplTest, FlushWriteCloseTest) { dispatcher_->run(Event::Dispatcher::RunType::Block); } -// Test that a FlushWrite close will create and enable a timer which closes the connection when -// triggered. -TEST_P(ConnectionImplTest, FlushWriteCloseTimeoutTest) { - ConnectionMocks mocks = createConnectionMocks(); - IoHandlePtr io_handle = std::make_unique(0); - auto server_connection = std::make_unique( - *mocks.dispatcher, - std::make_unique(std::move(io_handle), nullptr, nullptr), - std::move(mocks.transport_socket), true); - - InSequence s1; - - // Enable delayed connection close processing by setting a non-zero timeout value. The actual - // value (> 0) doesn't matter since the callback is triggered below. - server_connection->setDelayedCloseTimeout(std::chrono::milliseconds(100)); - - NiceMockConnectionStats stats; - server_connection->setConnectionStats(stats.toBufferStats()); - - Buffer::OwnedImpl data("data"); - server_connection->write(data, false); - - // Data is pending in the write buffer, which will trigger the FlushWrite close to go into delayed - // close processing. - EXPECT_CALL(*mocks.timer, enableTimer(_)).Times(1); - server_connection->close(ConnectionCloseType::FlushWrite); - - EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(1); - // Since the callback is being invoked manually, disableTimer() will be called when the connection - // is closed by the callback. - EXPECT_CALL(*mocks.timer, disableTimer()).Times(1); - // Issue the delayed close callback to ensure connection is closed. - mocks.timer->callback_(); -} - // Test that a FlushWriteAndDelay close causes Envoy to flush the write and wait for the client/peer // to close (until a configured timeout which is not expected to trigger in this test). TEST_P(ConnectionImplTest, FlushWriteAndDelayCloseTest) { @@ -1106,6 +1076,89 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayCloseTimerTriggerTest) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +// Test that a close(FlushWrite) after a delayed close timer has been enabled via +// close(FlushWriteAndDelay) will trigger a socket close after the flush is complete. +TEST_P(ConnectionImplTest, FlushWriteAfterFlushWriteAndDelayWithPendingWrite) { + setUpBasicConnection(); + connect(); + + InSequence s1; + // The actual timeout is insignificant, we just need to enable delayed close processing by + // setting it to > 0. + server_connection_->setDelayedCloseTimeout(std::chrono::milliseconds(50)); + + std::shared_ptr client_read_filter(new NiceMock()); + client_connection_->addReadFilter(client_read_filter); + NiceMockConnectionStats stats; + server_connection_->setConnectionStats(stats.toBufferStats()); + + Buffer::OwnedImpl data("Connection: Close"); + server_connection_->write(data, false); + + time_system_.setMonotonicTime(std::chrono::milliseconds(0)); + + // The delayed close timer will be enabled by this call. Data in the write buffer hasn't been + // flushed yet since the dispatcher has not run. + server_connection_->close(ConnectionCloseType::FlushWriteAndDelay); + // The timer won't be disabled but this close() overwrites the delayed close state such that a + // successful flush will immediately close the socket. + server_connection_->close(ConnectionCloseType::FlushWrite); + + // The socket close will happen as a result of the write flush and not due to the delayed close + // timer triggering. + EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0); + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)).Times(1); + EXPECT_CALL(*client_read_filter, onData(BufferStringEqual("Connection: Close"), false)) + .Times(1) + .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus { + time_system_.setMonotonicTime(std::chrono::milliseconds(100)); + return FilterStatus::StopIteration; + })); + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .Times(1) + .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test that a close(FlushWrite) triggers an immediate close when a delayed close timer has been +// enabled via a prior close(FlushWriteAndDelay). +TEST_P(ConnectionImplTest, FlushWriteAfterFlushWriteAndDelayWithoutPendingWrite) { + setUpBasicConnection(); + connect(); + + InSequence s1; + // The actual timeout is insignificant, we just need to enable delayed close processing by + // setting it to > 0. + server_connection_->setDelayedCloseTimeout(std::chrono::milliseconds(50)); + + std::shared_ptr client_read_filter(new NiceMock()); + client_connection_->addReadFilter(client_read_filter); + NiceMockConnectionStats stats; + server_connection_->setConnectionStats(stats.toBufferStats()); + + Buffer::OwnedImpl data("Connection: Close"); + server_connection_->write(data, false); + + server_connection_->close(ConnectionCloseType::FlushWriteAndDelay); + EXPECT_CALL(*client_read_filter, onData(BufferStringEqual("Connection: Close"), false)) + .Times(1) + .WillOnce(InvokeWithoutArgs([&]() -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + // The write buffer has been flushed and a delayed close timer has been set. The socket close will + // happen as part of the close() since the timeout is no longer required. + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)).Times(1); + server_connection_->close(ConnectionCloseType::FlushWrite); + EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0); + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .Times(1) + .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + // Test that delayed close processing can be disabled by setting the delayed close timeout interval // to 0. TEST_P(ConnectionImplTest, FlushWriteAndDelayConfigDisabledTest) { @@ -1144,27 +1197,90 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayConfigDisabledTest) { server_connection->close(ConnectionCloseType::NoFlush); } +// Test that the delayed close timer is reset while write flushes are happening when a connection is +// in delayed close mode. +TEST_P(ConnectionImplTest, DelayedCloseTimerResetWithPendingWriteBufferFlushes) { + ConnectionMocks mocks = createConnectionMocks(); + MockTransportSocket* transport_socket = mocks.transport_socket_.get(); + IoHandlePtr io_handle = std::make_unique(0); + auto server_connection = std::make_unique( + *mocks.dispatcher_, + std::make_unique(std::move(io_handle), nullptr, nullptr), + std::move(mocks.transport_socket_), true); + + InSequence s1; + // The actual timeout is insignificant, we just need to enable delayed close processing by + // setting it to > 0. + auto timeout = std::chrono::milliseconds(100); + server_connection->setDelayedCloseTimeout(timeout); + + EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write)) + .WillOnce(Invoke(*mocks.file_ready_cb_)); + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + // Do not drain the buffer and return 0 bytes processed to simulate backpressure. + return IoResult{PostIoAction::KeepOpen, 0, false}; + })); + Buffer::OwnedImpl data("data"); + server_connection->write(data, false); + + EXPECT_CALL(*mocks.timer_, enableTimer(timeout)).Times(1); + server_connection->close(ConnectionCloseType::FlushWriteAndDelay); + + // The write ready event cb (ConnectionImpl::onWriteReady()) will reset the timer to its original + // timeout value to avoid triggering while the write buffer is being actively flushed. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + // Partial flush. + return IoResult{PostIoAction::KeepOpen, 1, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { + // Flush the entire buffer. + buffer.drain(buffer.length()); + return IoResult{PostIoAction::KeepOpen, buffer.length(), false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Force the delayed close timeout to trigger so the connection is cleaned up. + mocks.timer_->callback_(); +} + // Test that tearing down the connection will disable the delayed close timer. TEST_P(ConnectionImplTest, DelayedCloseTimeoutDisableOnSocketClose) { ConnectionMocks mocks = createConnectionMocks(); + MockTransportSocket* transport_socket = mocks.transport_socket_.get(); IoHandlePtr io_handle = std::make_unique(0); auto server_connection = std::make_unique( - *mocks.dispatcher, + *mocks.dispatcher_, std::make_unique(std::move(io_handle), nullptr, nullptr), - std::move(mocks.transport_socket), true); + std::move(mocks.transport_socket_), true); InSequence s1; - // The actual timeout is insignificant, we just need to enable delayed close processing by setting - // it to > 0. + // The actual timeout is insignificant, we just need to enable delayed close processing by + // setting it to > 0. server_connection->setDelayedCloseTimeout(std::chrono::milliseconds(100)); Buffer::OwnedImpl data("data"); + EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write)) + .WillOnce(Invoke(*mocks.file_ready_cb_)); + // The buffer must be drained when write() is called on the connection to allow the close() to + // enable the timer. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { + buffer.drain(buffer.length()); + return IoResult{PostIoAction::KeepOpen, buffer.length(), false}; + })); server_connection->write(data, false); - EXPECT_CALL(*mocks.timer, enableTimer(_)).Times(1); + EXPECT_CALL(*mocks.timer_, enableTimer(_)).Times(1); // Enable the delayed close timer. server_connection->close(ConnectionCloseType::FlushWriteAndDelay); - EXPECT_CALL(*mocks.timer, disableTimer()).Times(1); + EXPECT_CALL(*mocks.timer_, disableTimer()).Times(1); // This close() will call closeSocket(), which should disable the timer to avoid triggering it // after the connection's data structures have been reset. server_connection->close(ConnectionCloseType::NoFlush); @@ -1173,11 +1289,12 @@ TEST_P(ConnectionImplTest, DelayedCloseTimeoutDisableOnSocketClose) { // Test that the delayed close timeout callback is resilient to connection teardown edge cases. TEST_P(ConnectionImplTest, DelayedCloseTimeoutNullStats) { ConnectionMocks mocks = createConnectionMocks(); + MockTransportSocket* transport_socket = mocks.transport_socket_.get(); IoHandlePtr io_handle = std::make_unique(0); auto server_connection = std::make_unique( - *mocks.dispatcher, + *mocks.dispatcher_, std::make_unique(std::move(io_handle), nullptr, nullptr), - std::move(mocks.transport_socket), true); + std::move(mocks.transport_socket_), true); InSequence s1; @@ -1190,14 +1307,24 @@ TEST_P(ConnectionImplTest, DelayedCloseTimeoutNullStats) { // that edge case. Buffer::OwnedImpl data("data"); + EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write)) + .WillOnce(Invoke(*mocks.file_ready_cb_)); + // The buffer must be drained when write() is called on the connection to allow the close() to + // enable the timer. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { + buffer.drain(buffer.length()); + return IoResult{PostIoAction::KeepOpen, buffer.length(), false}; + })); server_connection->write(data, false); - EXPECT_CALL(*mocks.timer, enableTimer(_)).Times(1); + EXPECT_CALL(*mocks.timer_, enableTimer(_)).Times(1); server_connection->close(ConnectionCloseType::FlushWriteAndDelay); - EXPECT_CALL(*mocks.timer, disableTimer()).Times(1); + EXPECT_CALL(*mocks.timer_, disableTimer()).Times(1); // Copy the callback since mocks.timer will be freed when closeSocket() is called. - Event::TimerCb callback = mocks.timer->callback_; - // The following close() will call closeSocket() and reset internal data structures such as stats. + Event::TimerCb callback = mocks.timer_->callback_; + // The following close() will call closeSocket() and reset internal data structures such as + // stats. server_connection->close(ConnectionCloseType::NoFlush); // Verify the onDelayedCloseTimeout() callback is resilient to the post closeSocket(), pre // destruction state. This should not actually happen due to the timeout disablement in diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index e56bc4923b4ea..f695a50330364 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -189,7 +189,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlush) { tcp_client->waitForHalfClose(); EXPECT_EQ(test_server_->counter("tcp.tcp_stats.upstream_flush_total")->value(), 1); - EXPECT_EQ(test_server_->gauge("tcp.tcp_stats.upstream_flush_active")->value(), 0); + test_server_->waitForGaugeEq("tcp.tcp_stats.upstream_flush_active", 0); } // Test that Envoy doesn't crash or assert when shutting down with an upstream flush active diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 06b17541a93cd..b3d824fe913fb 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -40,6 +40,7 @@ CTX CTXs CVC CX +CloseAterFlush DCHECK DER DESC From 06931842450760fbdc6b4df8b732bbd031ec51fc Mon Sep 17 00:00:00 2001 From: Maxime Bedard Date: Fri, 12 Apr 2019 13:29:58 -0400 Subject: [PATCH 112/165] Add support for redis 5 zpopmin and zpopmax (#6563) Signed-off-by: Maxime Bedard --- docs/root/intro/arch_overview/redis.rst | 2 ++ docs/root/intro/version_history.rst | 1 + .../filters/network/common/redis/supported_commands.h | 6 +++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst index 044ea66553726..e2f8efbc25656 100644 --- a/docs/root/intro/arch_overview/redis.rst +++ b/docs/root/intro/arch_overview/redis.rst @@ -148,6 +148,8 @@ For details on each command's usage see the official ZREVRANGEBYLEX, Sorted Set ZREVRANGEBYSCORE, Sorted Set ZREVRANK, Sorted Set + ZPOPMIN, Sorted Set + ZPOPMAX, Sorted Set ZSCAN, Sorted Set ZSCORE, Sorted Set APPEND, String diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 6f9c814fd80a6..35fba003bbae6 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,6 +5,7 @@ Version history ================ * dubbo_proxy: support the :ref:`Dubbo proxy filter `. * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. +* redis: add support for zpopmax and zpopmin commands. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. 1.10.0 (Apr 5, 2019) diff --git a/source/extensions/filters/network/common/redis/supported_commands.h b/source/extensions/filters/network/common/redis/supported_commands.h index 54c06d0bcb025..13210d62abb72 100644 --- a/source/extensions/filters/network/common/redis/supported_commands.h +++ b/source/extensions/filters/network/common/redis/supported_commands.h @@ -26,9 +26,9 @@ struct SupportedCommands { "pexpireat", "psetex", "pttl", "restore", "rpop", "rpush", "rpushx", "sadd", "scard", "set", "setbit", "setex", "setnx", "setrange", "sismember", "smembers", "spop", "srandmember", "srem", "sscan", "strlen", "ttl", "type", "zadd", "zcard", "zcount", "zincrby", "zlexcount", - "zrange", "zrangebylex", "zrangebyscore", "zrank", "zrem", "zremrangebylex", - "zremrangebyrank", "zremrangebyscore", "zrevrange", "zrevrangebylex", "zrevrangebyscore", - "zrevrank", "zscan", "zscore"); + "zpopmin", "zpopmax", "zrange", "zrangebylex", "zrangebyscore", "zrank", "zrem", + "zremrangebylex", "zremrangebyrank", "zremrangebyscore", "zrevrange", "zrevrangebylex", + "zrevrangebyscore", "zrevrank", "zscan", "zscore"); } /** From d3247311328ebc83cfcce5e6b24fa9109117e80d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Fri, 12 Apr 2019 10:53:29 -0700 Subject: [PATCH 113/165] thrift_proxy: fix crash when remote closes the connection (#6549) There's a few paths within the Thrift Proxy where we should ensure the connection is not closed, before trying to write. This change ensures that sendLocalReply() will return early if the connection is gone. It also adds a check for transformEnd(), which gets called from upstreamData(). Risk Level: low Testing: unit tests added Fixes: #6496 Signed-off-by: Raul Gutierrez Segales --- .../network/thrift_proxy/conn_manager.cc | 10 ++- .../network/thrift_proxy/conn_manager_test.cc | 65 +++++++++++++++++++ 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index 1b5d51e43b5e9..0edb3d6cbb04c 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -87,8 +87,11 @@ void ConnectionManager::dispatch() { void ConnectionManager::sendLocalReply(MessageMetadata& metadata, const DirectResponse& response, bool end_stream) { - Buffer::OwnedImpl buffer; + if (read_callbacks_->connection().state() == Network::Connection::State::Closed) { + return; + } + Buffer::OwnedImpl buffer; const DirectResponse::ResponseType result = response.encode(metadata, *protocol_, buffer); Buffer::OwnedImpl response_buffer; @@ -204,6 +207,11 @@ FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { ConnectionManager& cm = parent_.parent_; + if (cm.read_callbacks_->connection().state() == Network::Connection::State::Closed) { + complete_ = true; + throw EnvoyException("downstream connection is closed"); + } + Buffer::OwnedImpl buffer; // Use the factory to get the concrete transport from the decoder transport (as opposed to diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index 4336ea3262667..0af14a1d4a29d 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -1204,6 +1204,44 @@ TEST_F(ThriftConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) { EXPECT_EQ(1U, store_.counter("test.response_error").value()); } +// sendLocalReply does nothing, when the remote closed the connection. +TEST_F(ThriftConnectionManagerTest, OnDataWithFilterSendLocalReplyRemoteClosedConnection) { + auto* filter = new NiceMock(); + custom_filter_.reset(filter); + + initializeFilter(); + writeFramedBinaryMessage(buffer_, MessageType::Call, 0x0F); + + ThriftFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)) + .WillOnce( + Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)); + + NiceMock direct_response; + EXPECT_CALL(direct_response, encode(_, _, _)).Times(0); + + // First filter sends local reply. + EXPECT_CALL(*filter, messageBegin(_)) + .WillOnce(Invoke([&](MessageMetadataSharedPtr) -> FilterStatus { + callbacks->sendLocalReply(direct_response, false); + return FilterStatus::StopIteration; + })); + EXPECT_CALL(filter_callbacks_.connection_, write(_, false)).Times(0); + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + + // Remote closes the connection. + filter_callbacks_.connection_.state_ = Network::Connection::State::Closed; + EXPECT_EQ(filter_->onData(buffer_, true), Network::FilterStatus::StopIteration); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_call").value()); + EXPECT_EQ(0U, store_.gauge("test.request_active").value()); + EXPECT_EQ(0U, store_.counter("test.response").value()); + EXPECT_EQ(0U, store_.counter("test.response_error").value()); +} + // Tests a decoder filter that modifies data. TEST_F(ThriftConnectionManagerTest, DecoderFiltersModifyRequests) { auto* filter = new NiceMock(); @@ -1252,6 +1290,33 @@ TEST_F(ThriftConnectionManagerTest, DecoderFiltersModifyRequests) { EXPECT_EQ(1U, store_.gauge("test.request_active").value()); } +TEST_F(ThriftConnectionManagerTest, transportEndWhenRemoteClose) { + initializeFilter(); + writeComplexFramedBinaryMessage(buffer_, MessageType::Call, 0x0F); + + ThriftFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce( + Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request_call").value()); + + writeComplexFramedBinaryMessage(write_buffer_, MessageType::Reply, 0x0F); + + FramedTransportImpl transport; + BinaryProtocolImpl proto; + callbacks->startUpstreamResponse(transport, proto); + + // Remote closes the connection. + filter_callbacks_.connection_.state_ = Network::Connection::State::Closed; + EXPECT_EQ(ThriftFilters::ResponseStatus::Reset, callbacks->upstreamData(write_buffer_)); + EXPECT_EQ(0U, store_.counter("test.response").value()); + EXPECT_EQ(1U, store_.counter("test.response_decoding_error").value()); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); +} + } // namespace ThriftProxy } // namespace NetworkFilters } // namespace Extensions From 8a5a32b5a754e3393bd42f18733c2c0777bbe380 Mon Sep 17 00:00:00 2001 From: danzh Date: Fri, 12 Apr 2019 16:49:12 -0400 Subject: [PATCH 114/165] Implement http2 macros (#6567) Signed-off-by: Dan Zhang --- .../quiche/platform/http2_macros_impl.h | 21 +++++++++++++----- .../quiche/platform/http2_platform_test.cc | 22 ++++++++++++------- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h b/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h index 3d7df5563e9f1..4f99f2d2f42b8 100644 --- a/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/http2_macros_impl.h @@ -1,15 +1,26 @@ #pragma once -#include "absl/base/macros.h" - // NOLINT(namespace-envoy) // This file is part of the QUICHE platform implementation, and is not to be // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include + +#include "extensions/quic_listeners/quiche/platform/quic_logging_impl.h" + +#include "absl/base/macros.h" + #define HTTP2_FALLTHROUGH_IMPL ABSL_FALLTHROUGH_INTENDED -#define HTTP2_DIE_IF_NULL_IMPL(ptr) ABSL_DIE_IF_NULL(ptr) +#define HTTP2_DIE_IF_NULL_IMPL(ptr) dieIfNull(ptr) +#define HTTP2_UNREACHABLE_IMPL() DCHECK(false) + +namespace http2 { + +template inline T dieIfNull(T&& ptr) { + CHECK((ptr) != nullptr); + return std::forward(ptr); +} -// TODO: implement -#define HTTP2_UNREACHABLE_IMPL() 0 +} // namespace http2 diff --git a/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc index 10b9b38787e1e..0cd37a1512624 100644 --- a/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/http2_platform_test.cc @@ -1,3 +1,9 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + #include #include "test/test_common/logging.h" @@ -8,6 +14,7 @@ #include "quiche/http2/platform/api/http2_containers.h" #include "quiche/http2/platform/api/http2_estimate_memory_usage.h" #include "quiche/http2/platform/api/http2_logging.h" +#include "quiche/http2/platform/api/http2_macros.h" #include "quiche/http2/platform/api/http2_optional.h" #include "quiche/http2/platform/api/http2_ptr_util.h" #include "quiche/http2/platform/api/http2_string.h" @@ -19,10 +26,7 @@ // minimal, and serve primarily to verify the APIs compile and link without // issue. -namespace Envoy { -namespace Extensions { -namespace QuicListeners { -namespace Quiche { +namespace http2 { namespace { TEST(Http2PlatformTest, Http2Arraysize) { @@ -96,8 +100,10 @@ TEST(Http2PlatformTest, Http2StringPiece) { EXPECT_EQ('b', sp[0]); } +TEST(Http2PlatformTest, Http2Macro) { + EXPECT_DEBUG_DEATH(HTTP2_UNREACHABLE(), ""); + EXPECT_DEATH(HTTP2_DIE_IF_NULL(nullptr), ""); +} + } // namespace -} // namespace Quiche -} // namespace QuicListeners -} // namespace Extensions -} // namespace Envoy +} // namespace http2 From a5a1a9965e62389f0ad8f35a4afb82010af4fde2 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Fri, 12 Apr 2019 23:57:23 -0400 Subject: [PATCH 115/165] test: Ensure only one thread advances time, and enable simulated-time for hds_integration_test. (#6541) Description: It is hard to reason about tests that push time forward from more than one thread. This was happening in contention-tests. This adds some asserts to ensure that we don't do this in future tests, and fixes those tests that were doing it. Risk Level: low Testing: //test/..., and hds_integration_test witih tsan and --runs_per_test=1000 Docs Changes: n/a Release Notes: n/a Fixes: #6239 Signed-off-by: Joshua Marantz --- source/exe/main_common.h | 6 ++++ source/server/hot_restart_impl.cc | 5 ++-- test/common/common/BUILD | 1 + test/common/common/mutex_tracer_test.cc | 10 +++++-- test/exe/main_common_test.cc | 2 +- test/integration/hds_integration_test.cc | 1 + test/test_common/BUILD | 31 +++++++++++++++++++++ test/test_common/contention.cc | 25 ++++++++++++----- test/test_common/contention.h | 7 ++++- test/test_common/file_system_for_test.cc | 20 +++++++++++++ test/test_common/file_system_for_test.h | 9 ++++++ test/test_common/only_one_thread.cc | 25 +++++++++++++++++ test/test_common/only_one_thread.h | 28 +++++++++++++++++++ test/test_common/simulated_time_system.cc | 2 ++ test/test_common/simulated_time_system.h | 2 ++ test/test_common/test_time.cc | 6 +++- test/test_common/test_time.h | 2 ++ test/test_common/thread_factory_for_test.cc | 19 +++++++++++++ test/test_common/thread_factory_for_test.h | 9 ++++++ test/test_common/utility.cc | 29 ------------------- test/test_common/utility.h | 10 ++----- 21 files changed, 197 insertions(+), 52 deletions(-) create mode 100644 test/test_common/file_system_for_test.cc create mode 100644 test/test_common/file_system_for_test.h create mode 100644 test/test_common/only_one_thread.cc create mode 100644 test/test_common/only_one_thread.h create mode 100644 test/test_common/thread_factory_for_test.cc create mode 100644 test/test_common/thread_factory_for_test.h diff --git a/source/exe/main_common.h b/source/exe/main_common.h index 28e5708018a58..f88fcbb457183 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -106,6 +106,12 @@ class MainCommon { static std::string hotRestartVersion(uint64_t max_num_stats, uint64_t max_stat_name_len, bool hot_restart_enabled); + /** + * @return a pointer to the server instance, or nullptr if initialized into + * validation mode. + */ + Server::Instance* server() { return base_.server(); } + private: #ifdef ENVOY_HANDLE_SIGNALS Envoy::SignalAction handle_sigs; diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index 1fb34c4a879c4..a2a6dc5e002a3 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -160,8 +160,9 @@ int HotRestartImpl::bindDomainSocket(uint64_t id) { Api::SysCallIntResult result = os_sys_calls.bind(fd, reinterpret_cast(&address), sizeof(address)); if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("unable to bind domain socket with id={} (see --base-id option)", id)); + throw EnvoyException(fmt::format( + "unable to bind domain socket with id={}, (see --base-id option), address={}, errno={}: {}", + id, result.errno_, strerror(result.errno_))); } return fd; diff --git a/test/common/common/BUILD b/test/common/common/BUILD index dc68632fe5bb6..598ad406adc9f 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -119,6 +119,7 @@ envoy_cc_test( deps = [ "//source/common/common:mutex_tracer_lib", "//test/test_common:contention_lib", + "//test/test_common:utility_lib", ], ) diff --git a/test/common/common/mutex_tracer_test.cc b/test/common/common/mutex_tracer_test.cc index 042b0469f1994..e72fc3a995b94 100644 --- a/test/common/common/mutex_tracer_test.cc +++ b/test/common/common/mutex_tracer_test.cc @@ -5,6 +5,7 @@ #include "common/common/mutex_tracer_impl.h" #include "test/test_common/contention.h" +#include "test/test_common/utility.h" #include "absl/synchronization/mutex.h" #include "gtest/gtest.h" @@ -72,13 +73,16 @@ TEST_F(MutexTracerTest, TryLockNoContention) { } TEST_F(MutexTracerTest, TwoThreadsWithContention) { + Api::ApiPtr api = Api::createApiForTest(); + int64_t prev_num_contentions = tracer_.numContentions(); for (int i = 1; i <= 10; ++i) { int64_t curr_num_lifetime_wait_cycles = tracer_.lifetimeWaitCycles(); - Thread::TestUtil::ContentionGenerator contention_generator; - + Thread::TestUtil::ContentionGenerator contention_generator(*api); contention_generator.generateContention(tracer_); - EXPECT_EQ(tracer_.numContentions(), i); + int64_t num_contentions = tracer_.numContentions(); + EXPECT_LT(prev_num_contentions, num_contentions); + prev_num_contentions = num_contentions; EXPECT_GT(tracer_.currentWaitCycles(), 0); // This shouldn't be hardcoded. EXPECT_GT(tracer_.lifetimeWaitCycles(), 0); EXPECT_GT(tracer_.lifetimeWaitCycles(), curr_num_lifetime_wait_cycles); diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 1e5136b96e249..2b1aee544fec3 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -308,7 +308,7 @@ TEST_P(AdminRequestTest, AdminRequestContentionEnabled) { waitForEnvoyRun(); // Induce contention to guarantee a non-zero num_contentions count. - Thread::TestUtil::ContentionGenerator contention_generator; + Thread::TestUtil::ContentionGenerator contention_generator(main_common_->server()->api()); contention_generator.generateContention(MutexTracerImpl::getOrCreateTracer()); std::string response = adminRequest("/contention", "GET"); diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index dd8fe59451747..eb4eba9ce2893 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -26,6 +26,7 @@ namespace { // TODO(jmarantz): switch this to simulated-time after debugging flakes. class HdsIntegrationTest : public testing::TestWithParam, + public Event::TestUsingSimulatedTime, public HttpIntegrationTest { public: HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 6044a39b67715..293640e4d3cee 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -92,7 +92,9 @@ envoy_cc_test_library( "abseil_strings", ], deps = [ + ":file_system_for_test_lib", ":test_time_lib", + ":thread_factory_for_test_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/http:codec_interface", "//include/envoy/network:address_interface", @@ -117,6 +119,26 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "thread_factory_for_test_lib", + srcs = ["thread_factory_for_test.cc"], + hdrs = ["thread_factory_for_test.h"], + deps = [ + "//source/common/common:thread_lib", + "//source/common/common:utility_lib", + ], +) + +envoy_cc_test_library( + name = "file_system_for_test_lib", + srcs = ["file_system_for_test.cc"], + hdrs = ["file_system_for_test.h"], + deps = [ + "//source/common/common:utility_lib", + "//source/common/filesystem:filesystem_lib", + ], +) + envoy_cc_test( name = "utility_test", srcs = ["utility_test.cc"], @@ -161,12 +183,20 @@ envoy_cc_test( ], ) +envoy_cc_test_library( + name = "only_one_thread_lib", + srcs = ["only_one_thread.cc"], + hdrs = ["only_one_thread.h"], + deps = [":thread_factory_for_test_lib"], +) + envoy_cc_test_library( name = "test_time_lib", srcs = ["test_time.cc"], hdrs = ["test_time.h"], deps = [ ":global_lib", + ":only_one_thread_lib", ":test_time_system_interface", "//source/common/event:real_time_system_lib", ], @@ -188,6 +218,7 @@ envoy_cc_test_library( srcs = ["simulated_time_system.cc"], hdrs = ["simulated_time_system.h"], deps = [ + ":only_one_thread_lib", ":test_time_system_interface", "//source/common/event:event_impl_base_lib", "//source/common/event:real_time_system_lib", diff --git a/test/test_common/contention.cc b/test/test_common/contention.cc index 10df692c4d8ef..9160120ba5faa 100644 --- a/test/test_common/contention.cc +++ b/test/test_common/contention.cc @@ -19,14 +19,25 @@ Envoy::Thread::ThreadPtr ContentionGenerator::launchThread(MutexTracerImpl& trac } void ContentionGenerator::holdUntilContention(MutexTracerImpl& tracer) { + Event::DispatcherPtr dispatcher = api_.allocateDispatcher(); + Event::TimerPtr timer = dispatcher->createTimer([&dispatcher]() { dispatcher->exit(); }); + auto sleep_ms = [&timer, &dispatcher](int num_ms) { + timer->enableTimer(std::chrono::milliseconds(num_ms)); + dispatcher->run(Event::Dispatcher::RunType::RunUntilExit); + }; int64_t curr_num_contentions = tracer.numContentions(); - while (tracer.numContentions() == curr_num_contentions) { - test_time_.timeSystem().sleep(std::chrono::milliseconds(1)); - LockGuard lock(mutex_); - // We hold the lock 90% of the time to ensure both contention and eventual acquisition, which - // is needed to bump numContentions(). - test_time_.timeSystem().sleep(std::chrono::milliseconds(9)); - } + do { + sleep_ms(1); + { + LockGuard lock(mutex_); + // We hold the lock 90% of the time to ensure both contention and eventual acquisition, which + // is needed to bump numContentions(). + sleep_ms(9); + } + if (tracer.numContentions() > curr_num_contentions) { + found_contention_ = true; + } + } while (!found_contention_); } } // namespace TestUtil diff --git a/test/test_common/contention.h b/test/test_common/contention.h index 77ce14d4872a8..d90ab20d4eea0 100644 --- a/test/test_common/contention.h +++ b/test/test_common/contention.h @@ -3,6 +3,8 @@ #include #include +#include "envoy/api/api.h" + #include "common/common/lock_guard.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/thread.h" @@ -20,6 +22,8 @@ namespace TestUtil { class ContentionGenerator { public: + ContentionGenerator(Api::Api& api) : api_(api) {} + /** * Generates at least once occurrence of mutex contention, as measured by tracer. */ @@ -30,7 +34,8 @@ class ContentionGenerator { void holdUntilContention(MutexTracerImpl& tracer); MutexBasicLockable mutex_; - DangerousDeprecatedTestTime test_time_; + Api::Api& api_; + std::atomic found_contention_{false}; }; } // namespace TestUtil diff --git a/test/test_common/file_system_for_test.cc b/test/test_common/file_system_for_test.cc new file mode 100644 index 0000000000000..68d24fd9105f9 --- /dev/null +++ b/test/test_common/file_system_for_test.cc @@ -0,0 +1,20 @@ +#include "common/filesystem/filesystem_impl.h" + +namespace Envoy { + +namespace Filesystem { + +// TODO(sesmith177) Tests should get the Filesystem::Instance from the same location as the main +// code +Instance& fileSystemForTest() { +#ifdef WIN32 + static InstanceImplWin32* file_system = new InstanceImplWin32(); +#else + static InstanceImplPosix* file_system = new InstanceImplPosix(); +#endif + return *file_system; +} + +} // namespace Filesystem + +} // namespace Envoy diff --git a/test/test_common/file_system_for_test.h b/test/test_common/file_system_for_test.h new file mode 100644 index 0000000000000..43a443f68b6f5 --- /dev/null +++ b/test/test_common/file_system_for_test.h @@ -0,0 +1,9 @@ +#include "envoy/filesystem/filesystem.h" + +namespace Envoy { + +namespace Filesystem { +Instance& fileSystemForTest(); +} // namespace Filesystem + +} // namespace Envoy diff --git a/test/test_common/only_one_thread.cc b/test/test_common/only_one_thread.cc new file mode 100644 index 0000000000000..a6a5869b7eda6 --- /dev/null +++ b/test/test_common/only_one_thread.cc @@ -0,0 +1,25 @@ +#include "test/test_common/only_one_thread.h" + +#include "envoy/thread/thread.h" + +#include "common/common/lock_guard.h" + +#include "test/test_common/thread_factory_for_test.h" + +namespace Envoy { +namespace Thread { + +OnlyOneThread::OnlyOneThread() : thread_factory_(threadFactoryForTest()) {} + +void OnlyOneThread::checkOneThread() { + LockGuard lock(mutex_); + if (thread_advancing_time_ == nullptr) { + thread_advancing_time_ = thread_factory_.currentThreadId(); + } else { + RELEASE_ASSERT(thread_advancing_time_->isCurrentThreadId(), + "time should only be advanced on one thread in the context of a test"); + } +} + +} // namespace Thread +} // namespace Envoy diff --git a/test/test_common/only_one_thread.h b/test/test_common/only_one_thread.h new file mode 100644 index 0000000000000..1051c632e78a0 --- /dev/null +++ b/test/test_common/only_one_thread.h @@ -0,0 +1,28 @@ +#pragma once + +#include "common/common/assert.h" +#include "common/common/thread.h" + +namespace Envoy { +namespace Thread { + +// Ensures that an operation is performed on only one thread. The first caller +// to OnlyOneThread::checkOneThread establishes the thread ID, and subsequent +// ones will assert-fail if they do not match. +class OnlyOneThread { +public: + OnlyOneThread(); + + /** + * Ensures that one thread is used in a testcase to access some resource. + */ + void checkOneThread(); + +private: + ThreadFactory& thread_factory_; + ThreadIdPtr thread_advancing_time_ GUARDED_BY(mutex_); + mutable MutexBasicLockable mutex_; +}; + +} // namespace Thread +} // namespace Envoy diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index 204c47943a4c0..a54767114b119 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -207,6 +207,7 @@ MonotonicTime SimulatedTimeSystemHelper::monotonicTime() { } void SimulatedTimeSystemHelper::sleep(const Duration& duration) { + only_one_thread_.checkOneThread(); mutex_.lock(); MonotonicTime monotonic_time = monotonic_time_ + std::chrono::duration_cast(duration); @@ -216,6 +217,7 @@ void SimulatedTimeSystemHelper::sleep(const Duration& duration) { Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) { + only_one_thread_.checkOneThread(); const Duration real_time_poll_delay( std::min(std::chrono::duration_cast(std::chrono::milliseconds(50)), duration)); const MonotonicTime end_time = monotonicTime() + duration; diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 11ccad33a8e87..7dfec54b012e6 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -6,6 +6,7 @@ #include "common/common/thread.h" #include "common/common/utility.h" +#include "test/test_common/only_one_thread.h" #include "test/test_common/test_time_system.h" namespace Envoy { @@ -102,6 +103,7 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { uint64_t index_ GUARDED_BY(mutex_); mutable Thread::MutexBasicLockable mutex_; std::atomic pending_alarms_; + Thread::OnlyOneThread only_one_thread_; }; // Represents a simulated time system, where time is advanced by calling diff --git a/test/test_common/test_time.cc b/test/test_common/test_time.cc index 3b97d7f6afb36..09a5391cf128f 100644 --- a/test/test_common/test_time.cc +++ b/test/test_common/test_time.cc @@ -18,11 +18,15 @@ TestTimeSystem& GlobalTimeSystem::timeSystem() { return singleton_->timeSystem(make_real_time_system); } -void TestRealTimeSystem::sleep(const Duration& duration) { std::this_thread::sleep_for(duration); } +void TestRealTimeSystem::sleep(const Duration& duration) { + only_one_thread_.checkOneThread(); + std::this_thread::sleep_for(duration); +} Thread::CondVar::WaitStatus TestRealTimeSystem::waitFor(Thread::MutexBasicLockable& lock, Thread::CondVar& condvar, const Duration& duration) noexcept { + only_one_thread_.checkOneThread(); return condvar.waitFor(lock, duration); } diff --git a/test/test_common/test_time.h b/test/test_common/test_time.h index 4dc8bdb84cd6a..e97af83100640 100644 --- a/test/test_common/test_time.h +++ b/test/test_common/test_time.h @@ -3,6 +3,7 @@ #include "common/event/real_time_system.h" #include "test/test_common/global.h" +#include "test/test_common/only_one_thread.h" #include "test/test_common/test_time_system.h" namespace Envoy { @@ -27,6 +28,7 @@ class TestRealTimeSystem : public TestTimeSystem { private: Event::RealTimeSystem real_time_system_; + Thread::OnlyOneThread only_one_thread_; }; class GlobalTimeSystem : public DelegatingTestTimeSystemBase { diff --git a/test/test_common/thread_factory_for_test.cc b/test/test_common/thread_factory_for_test.cc new file mode 100644 index 0000000000000..0bdd93e0cdde9 --- /dev/null +++ b/test/test_common/thread_factory_for_test.cc @@ -0,0 +1,19 @@ +#include "common/common/thread_impl.h" + +namespace Envoy { + +namespace Thread { + +// TODO(sesmith177) Tests should get the ThreadFactory from the same location as the main code +ThreadFactory& threadFactoryForTest() { +#ifdef WIN32 + static ThreadFactoryImplWin32* thread_factory = new ThreadFactoryImplWin32(); +#else + static ThreadFactoryImplPosix* thread_factory = new ThreadFactoryImplPosix(); +#endif + return *thread_factory; +} + +} // namespace Thread + +} // namespace Envoy diff --git a/test/test_common/thread_factory_for_test.h b/test/test_common/thread_factory_for_test.h new file mode 100644 index 0000000000000..26060fca781e6 --- /dev/null +++ b/test/test_common/thread_factory_for_test.h @@ -0,0 +1,9 @@ +#include "envoy/thread/thread.h" + +namespace Envoy { + +namespace Thread { +ThreadFactory& threadFactoryForTest(); +} // namespace Thread + +} // namespace Envoy diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 8a92397b47810..3589718497169 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -401,35 +401,6 @@ MockedTestAllocator::~MockedTestAllocator() {} } // namespace Stats -namespace Thread { - -// TODO(sesmith177) Tests should get the ThreadFactory from the same location as the main code -ThreadFactory& threadFactoryForTest() { -#ifdef WIN32 - static ThreadFactoryImplWin32* thread_factory = new ThreadFactoryImplWin32(); -#else - static ThreadFactoryImplPosix* thread_factory = new ThreadFactoryImplPosix(); -#endif - return *thread_factory; -} - -} // namespace Thread - -namespace Filesystem { - -// TODO(sesmith177) Tests should get the Filesystem::Instance from the same location as the main -// code -Instance& fileSystemForTest() { -#ifdef WIN32 - static InstanceImplWin32* file_system = new InstanceImplWin32(); -#else - static InstanceImplPosix* file_system = new InstanceImplPosix(); -#endif - return *file_system; -} - -} // namespace Filesystem - namespace Api { class TestImplProvider { diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 063e6c89ae9ed..7318664e40f33 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -24,7 +24,9 @@ #include "common/stats/fake_symbol_table_impl.h" #include "common/stats/raw_stat_data.h" +#include "test/test_common/file_system_for_test.h" #include "test/test_common/printers.h" +#include "test/test_common/thread_factory_for_test.h" #include "absl/time/time.h" #include "gmock/gmock.h" @@ -534,14 +536,6 @@ class MockedTestAllocator : public TestAllocator { } // namespace Stats -namespace Thread { -ThreadFactory& threadFactoryForTest(); -} // namespace Thread - -namespace Filesystem { -Instance& fileSystemForTest(); -} // namespace Filesystem - namespace Api { ApiPtr createApiForTest(); ApiPtr createApiForTest(Stats::Store& stat_store); From 1e61a3f95f2c4d9ac1e54feae8693cee7906e2eb Mon Sep 17 00:00:00 2001 From: htuch Date: Sat, 13 Apr 2019 19:02:41 -0500 Subject: [PATCH 116/165] test/fuzz: frame replay style testing and fuzzing for HTTP/2 headers. (#6491) In the fix patch for CVE-2019-9900, we introduced some basic HTTP/2 manual fuzzing, where single bytes were corrupted in a HEADERS frame, to attempt to show that NUL/CR/LF were handled. However, testing that relies on codec_impl_test has nghttp2 as both client and server. This implies that Huffman coding may be present, and single byte corruptions of 0x00 don't imply a NUL for example. In this patch, we take a more principled approach and use artisinal HEADERS frames that have no Huffman or dynamic table compression to validate the above single byte corruption property. A nice side effect of this is that we can derived from this infrastructure stateless request/response HEADERS fuzzers that can cover uncompressed (specifically no Huffman) paths, which is more likely to provide a direct access to nghttp2 codec header sanitization logic. Risk level: Low Testing: Unit tests and ran both fuzzers under oss-fuzz Docker image. Seems reasonably fast and no crashes locally. Signed-off-by: Harvey Tuch --- test/common/http/http2/BUILD | 44 +++ test/common/http/http2/codec_impl_test.cc | 56 ---- test/common/http/http2/frame_replay.cc | 118 +++++++ test/common/http/http2/frame_replay.h | 91 ++++++ test/common/http/http2/frame_replay_test.cc | 288 ++++++++++++++++++ .../simple_example_huffman | Bin 0 -> 27 bytes .../simple_example_plain | Bin 0 -> 74 bytes .../http/http2/request_header_fuzz_test.cc | 44 +++ .../simple_example_huffman | Bin 0 -> 24 bytes .../simple_example_plain | Bin 0 -> 40 bytes .../http/http2/response_header_fuzz_test.cc | 43 +++ tools/spelling_dictionary.txt | 3 + 12 files changed, 631 insertions(+), 56 deletions(-) create mode 100644 test/common/http/http2/frame_replay.cc create mode 100644 test/common/http/http2/frame_replay.h create mode 100644 test/common/http/http2/frame_replay_test.cc create mode 100644 test/common/http/http2/request_header_corpus/simple_example_huffman create mode 100644 test/common/http/http2/request_header_corpus/simple_example_plain create mode 100644 test/common/http/http2/request_header_fuzz_test.cc create mode 100644 test/common/http/http2/response_header_corpus/simple_example_huffman create mode 100644 test/common/http/http2/response_header_corpus/simple_example_plain create mode 100644 test/common/http/http2/response_header_fuzz_test.cc diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 5fd65d1fff706..0c245388afb92 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_library", "envoy_package", @@ -55,6 +56,35 @@ envoy_cc_test( ], ) +envoy_cc_test_library( + name = "frame_replay_lib", + srcs = ["frame_replay.cc"], + hdrs = ["frame_replay.h"], + deps = [ + "//source/common/common:hex_lib", + "//source/common/common:macros", + "//source/common/http/http2:codec_lib", + "//test/common/http:common_lib", + "//test/common/http/http2:codec_impl_test_util", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/test_common:environment_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "frame_replay_test", + srcs = ["frame_replay_test.cc"], + data = [ + "request_header_corpus/simple_example_huffman", + "request_header_corpus/simple_example_plain", + "response_header_corpus/simple_example_huffman", + "response_header_corpus/simple_example_plain", + ], + deps = [":frame_replay_lib"], +) + envoy_cc_test( name = "metadata_encoder_decoder_test", srcs = ["metadata_encoder_decoder_test.cc"], @@ -69,3 +99,17 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_fuzz_test( + name = "response_header_fuzz_test", + srcs = ["response_header_fuzz_test.cc"], + corpus = "response_header_corpus", + deps = [":frame_replay_lib"], +) + +envoy_cc_fuzz_test( + name = "request_header_fuzz_test", + srcs = ["request_header_fuzz_test.cc"], + corpus = "request_header_corpus", + deps = [":frame_replay_lib"], +) diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index a220413d34969..576516b1662a6 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -34,12 +34,6 @@ namespace Http2 { using Http2SettingsTuple = ::testing::tuple; using Http2SettingsTestParam = ::testing::tuple; -constexpr Http2SettingsTuple - DefaultHttp2SettingsTuple(Http2Settings::DEFAULT_HPACK_TABLE_SIZE, - Http2Settings::DEFAULT_MAX_CONCURRENT_STREAMS, - Http2Settings::DEFAULT_MAX_CONCURRENT_STREAMS, - Http2Settings::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE); - class Http2CodecImplTestFixture { public: struct ConnectionWrapper { @@ -89,9 +83,6 @@ class Http2CodecImplTestFixture { if (corrupt_metadata_frame_) { corruptMetadataFramePayload(data); } - if (corrupt_at_offset_ >= 0) { - corruptAtOffset(data, corrupt_at_offset_, corrupt_with_char_); - } server_wrapper_.dispatch(data, *server_); })); ON_CALL(server_connection_, write(_, _)) @@ -148,9 +139,6 @@ class Http2CodecImplTestFixture { MockStreamCallbacks server_stream_callbacks_; // Corrupt a metadata frame payload. bool corrupt_metadata_frame_ = false; - // Corrupt frame at a given offset (if positive). - ssize_t corrupt_at_offset_{-1}; - char corrupt_with_char_{'\0'}; uint32_t max_request_headers_kb_ = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; }; @@ -1041,50 +1029,6 @@ TEST_P(Http2CodecImplTest, TestCodecHeaderCompression) { } } -// Validate that nghttp2 rejects NUL/CR/LF as per -// https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. -// TEST_P(Http2CodecImplTest, InvalidHeaderChars) { -// TODO(htuch): Write me. Http2CodecImplMutationTest basically covers this, -// but we could be a bit more specific and add a captured H2 HEADERS frame -// here and inject it with mutation of just the header value, ensuring we get -// the expected codec exception. -// } - -class Http2CodecImplMutationTest : public ::testing::TestWithParam<::testing::tuple>, - protected Http2CodecImplTestFixture { -public: - Http2CodecImplMutationTest() - : Http2CodecImplTestFixture(DefaultHttp2SettingsTuple, DefaultHttp2SettingsTuple) {} - - void initialize() override { - corrupt_with_char_ = ::testing::get<0>(GetParam()); - corrupt_at_offset_ = ::testing::get<1>(GetParam()); - Http2CodecImplTestFixture::initialize(); - } -}; - -INSTANTIATE_TEST_SUITE_P(Http2CodecImplMutationTest, Http2CodecImplMutationTest, - ::testing::Combine(::testing::ValuesIn({'\0', '\r', '\n'}), - ::testing::Range(0, 128))); - -// Mutate an arbitrary offset in the HEADERS frame with NUL/CR/LF. This should -// either throw an exception or continue, but we shouldn't crash due to -// validHeaderString() ASSERTs. -TEST_P(Http2CodecImplMutationTest, HandleInvalidChars) { - initialize(); - - TestHeaderMapImpl request_headers; - request_headers.addCopy("foo", "barbaz"); - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); - EXPECT_CALL(client_callbacks_, onGoAway()).Times(AnyNumber()); - try { - request_encoder_->encodeHeaders(request_headers, true); - } catch (const CodecProtocolException& e) { - ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); - } -} - } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/frame_replay.cc b/test/common/http/http2/frame_replay.cc new file mode 100644 index 0000000000000..7d2fe1cc7ae61 --- /dev/null +++ b/test/common/http/http2/frame_replay.cc @@ -0,0 +1,118 @@ +#include "test/common/http/http2/frame_replay.h" + +#include "common/common/hex.h" +#include "common/common/macros.h" + +#include "test/common/http/common.h" +#include "test/test_common/environment.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +FileFrame::FileFrame(absl::string_view path) : api_(Api::createApiForTest()) { + const std::string contents = api_->fileSystem().fileReadToEnd( + TestEnvironment::runfilesPath("test/common/http/http2/" + std::string(path))); + frame_.resize(contents.size()); + contents.copy(reinterpret_cast(frame_.data()), frame_.size()); +} + +std::unique_ptr FileFrame::istream() { + const std::string frame_string{reinterpret_cast(frame_.data()), frame_.size()}; + return std::make_unique(frame_string); +} + +const Frame& WellKnownFrames::clientConnectionPrefaceFrame() { + CONSTRUCT_ON_FIRST_USE(std::vector, + {0x50, 0x52, 0x49, 0x20, 0x2a, 0x20, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x32, + 0x2e, 0x30, 0x0d, 0x0a, 0x0d, 0x0a, 0x53, 0x4d, 0x0d, 0x0a, 0x0d, 0x0a}); +} + +const Frame& WellKnownFrames::defaultSettingsFrame() { + CONSTRUCT_ON_FIRST_USE(std::vector, + {0x00, 0x00, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x7f, 0xff, 0xff, 0xff, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}); +} + +const Frame& WellKnownFrames::initialWindowUpdateFrame() { + CONSTRUCT_ON_FIRST_USE(std::vector, {0x00, 0x00, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0f, 0xff, 0x00, 0x01}); +} + +void FrameUtils::fixupHeaders(Frame& frame) { + constexpr size_t frame_header_len = 9; // from RFC 7540 + while (frame.size() < frame_header_len) { + frame.emplace_back(0x00); + } + size_t headers_len = frame.size() - frame_header_len; + frame[2] = headers_len & 0xff; + headers_len >>= 8; + frame[1] = headers_len & 0xff; + headers_len >>= 8; + frame[0] = headers_len & 0xff; + // HEADERS frame with END_STREAM | END_HEADERS for stream 1. + size_t offset = 3; + for (const uint8_t b : {0x01, 0x05, 0x00, 0x00, 0x00, 0x01}) { + frame[offset++] = b; + } +} + +CodecFrameInjector::CodecFrameInjector(const std::string& injector_name) + : injector_name_(injector_name) { + settings_.hpack_table_size_ = Http2Settings::DEFAULT_HPACK_TABLE_SIZE; + settings_.max_concurrent_streams_ = Http2Settings::DEFAULT_MAX_CONCURRENT_STREAMS; + settings_.initial_stream_window_size_ = Http2Settings::DEFAULT_INITIAL_STREAM_WINDOW_SIZE; + settings_.initial_connection_window_size_ = Http2Settings::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE; + settings_.allow_metadata_ = false; +} + +ClientCodecFrameInjector::ClientCodecFrameInjector() : CodecFrameInjector("server") { + auto client = std::make_unique(client_connection_, client_callbacks_, + stats_store_, settings_, + Http::DEFAULT_MAX_REQUEST_HEADERS_KB); + request_encoder_ = &client->newStream(response_decoder_); + connection_ = std::move(client); + ON_CALL(client_connection_, write(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { + ENVOY_LOG_MISC( + trace, "client write: {}", + Hex::encode(static_cast(data.linearize(data.length())), data.length())); + data.drain(data.length()); + })); + request_encoder_->getStream().addCallbacks(client_stream_callbacks_); + // Setup a single stream to inject frames as a reply to. + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_encoder_->encodeHeaders(request_headers, true); +} + +ServerCodecFrameInjector::ServerCodecFrameInjector() : CodecFrameInjector("client") { + connection_ = std::make_unique(server_connection_, server_callbacks_, + stats_store_, settings_, + Http::DEFAULT_MAX_REQUEST_HEADERS_KB); + EXPECT_CALL(server_callbacks_, newStream(_, _)) + .WillRepeatedly(Invoke([&](StreamEncoder& encoder, bool) -> StreamDecoder& { + encoder.getStream().addCallbacks(server_stream_callbacks_); + return request_decoder_; + })); + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { + ENVOY_LOG_MISC( + trace, "server write: {}", + Hex::encode(static_cast(data.linearize(data.length())), data.length())); + data.drain(data.length()); + })); +} + +void CodecFrameInjector::write(const Frame& frame) { + Buffer::OwnedImpl buffer; + buffer.add(frame.data(), frame.size()); + ENVOY_LOG_MISC(trace, "{} write: {}", injector_name_, Hex::encode(frame.data(), frame.size())); + while (buffer.length() > 0) { + connection_->dispatch(buffer); + } +} + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http2/frame_replay.h b/test/common/http/http2/frame_replay.h new file mode 100644 index 0000000000000..fcd750225fcf9 --- /dev/null +++ b/test/common/http/http2/frame_replay.h @@ -0,0 +1,91 @@ +#include +#include +#include + +#include "common/stats/isolated_store_impl.h" + +#include "test/common/http/http2/codec_impl_test_util.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/test_common/utility.h" + +#include "absl/strings/string_view.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +// A byte vector representation of an HTTP/2 frame. +typedef std::vector Frame; + +// An HTTP/2 frame derived from a file location. +class FileFrame { +public: + FileFrame(absl::string_view path); + + Frame& frame() { return frame_; } + std::unique_ptr istream(); + + Frame frame_; + Api::ApiPtr api_; +}; + +// Some standards HTTP/2 frames for setting up a connection. The contents for these and the seed +// corpus were captured via logging the hex bytes in codec_impl_test's write() connection mocks in +// setupDefaultConnectionMocks(). +class WellKnownFrames { +public: + static const Frame& clientConnectionPrefaceFrame(); + static const Frame& defaultSettingsFrame(); + static const Frame& initialWindowUpdateFrame(); +}; + +class FrameUtils { +public: + // Modify a given frame so that it has the HTTP/2 frame header for a valid + // HEADERS frame. + static void fixupHeaders(Frame& frame); +}; + +class CodecFrameInjector { +public: + CodecFrameInjector(const std::string& injector_name); + + void write(const Frame& frame); + + Http2Settings settings_; + std::unique_ptr connection_; + Stats::IsolatedStoreImpl stats_store_; + const std::string injector_name_; +}; + +// Wrapper for HTTP/2 client codec supporting injection of frames and expecting on +// the behaviors of callbacks and the request decoder. +class ClientCodecFrameInjector : public CodecFrameInjector { +public: + ClientCodecFrameInjector(); + + ::testing::NiceMock client_connection_; + MockConnectionCallbacks client_callbacks_; + MockStreamDecoder response_decoder_; + StreamEncoder* request_encoder_; + MockStreamCallbacks client_stream_callbacks_; +}; + +// Wrapper for HTTP/2 server codec supporting injection of frames and expecting on +// the behaviors of callbacks and the request decoder. +class ServerCodecFrameInjector : public CodecFrameInjector { +public: + ServerCodecFrameInjector(); + + ::testing::NiceMock server_connection_; + MockServerConnectionCallbacks server_callbacks_; + std::unique_ptr server_; + MockStreamDecoder request_decoder_; + MockStreamCallbacks server_stream_callbacks_; +}; + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc new file mode 100644 index 0000000000000..de886feaacb97 --- /dev/null +++ b/test/common/http/http2/frame_replay_test.cc @@ -0,0 +1,288 @@ +#include "common/http/exception.h" + +#include "test/common/http/common.h" +#include "test/common/http/http2/frame_replay.h" + +#include "gtest/gtest.h" + +#define EXPECT_NEXT_BYTES(istream, bs...) \ + do { \ + std::vector expected_bytes{bs}; \ + std::vector actual_bytes(expected_bytes.size()); \ + istream->read(reinterpret_cast(actual_bytes.data()), expected_bytes.size()); \ + EXPECT_EQ(actual_bytes, expected_bytes); \ + } while (0) + +using testing::AnyNumber; +using testing::InvokeWithoutArgs; + +namespace Envoy { +namespace Http { +namespace Http2 { +namespace { + +// For organizational purposes only. +class RequestFrameCommentTest : public ::testing::Test {}; +class ResponseFrameCommentTest : public ::testing::Test {}; + +// Validate that a simple Huffman encoded request HEADERS frame can be decoded. +TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { + FileFrame header{"request_header_corpus/simple_example_huffman"}; + + // Validate HEADERS content matches intent. + auto header_bytes = header.istream(); + // Payload size is 18 bytes. + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x12); + // HEADERS frame with END_STREAM | END_HEADERS for stream 1. + EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01); + // Static table :scheme: http, :method: GET + EXPECT_NEXT_BYTES(header_bytes, 0x86, 0x82); + // Static table :authority, Huffman 'host' + EXPECT_NEXT_BYTES(header_bytes, 0x41, 0x83, 0x9c, 0xe8, 0x4f); + // Static table :path: / + EXPECT_NEXT_BYTES(header_bytes, 0x84); + // Huffman foo: barbaz + EXPECT_NEXT_BYTES(header_bytes, 0x40, 0x82, 0x94, 0xe7, 0x85, 0x8c, 0x76, 0x46, 0x3f, 0x7f); + + // Validate HEADERS decode. + ServerCodecFrameInjector codec; + codec.write(WellKnownFrames::clientConnectionPrefaceFrame()); + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + TestHeaderMapImpl expected_headers; + HttpTestUtility::addDefaultHeaders(expected_headers); + expected_headers.addCopy("foo", "barbaz"); + EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); + codec.write(header.frame()); +} + +// Validate that a simple Huffman encoded response HEADERS frame can be decoded. +TEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) { + FileFrame header{"response_header_corpus/simple_example_huffman"}; + + // Validate HEADERS content matches intent. + auto header_bytes = header.istream(); + + // Payload size is 15 bytes. + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x0f); + // HEADERS frame with END_STREAM | END_HEADERS for stream 1. + EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01); + // Static table :status: 200 + EXPECT_NEXT_BYTES(header_bytes, 0x88); + // Huffman compression: test + EXPECT_NEXT_BYTES(header_bytes, 0x40, 0x88, 0x21, 0xe9, 0xae, 0xc2, 0xa1, 0x06, 0x3d, 0x5f, 0x83, + 0x49, 0x50, 0x9f); + + // Validate HEADERS decode. + ClientCodecFrameInjector codec; + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + TestHeaderMapImpl expected_headers; + expected_headers.addCopy(":status", "200"); + expected_headers.addCopy("compression", "test"); + EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); + codec.write(header.frame()); +} + +// Validate that a simple non-Huffman request HEADERS frame with no static table user either can be +// decoded. +TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { + FileFrame header{"request_header_corpus/simple_example_plain"}; + + // Validate HEADERS content matches intent. + auto header_bytes = header.istream(); + // Payload size is 65 bytes. + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x41); + // HEADERS frame with END_STREAM | END_HEADERS for stream 1. + EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01); + // Literal unindexed :scheme: http + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x07, 0x3A, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x65); + EXPECT_NEXT_BYTES(header_bytes, 0x04, 0x68, 0x74, 0x74, 0x70); + // Literal unindexed :method: GET + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x07, 0x3A, 0x6D, 0x65, 0x74, 0x68, 0x6F, 0x64); + EXPECT_NEXT_BYTES(header_bytes, 0x03, 0x47, 0x45, 0x54); + // Literal unindexed :authority: host + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x0A, 0x3A, 0x61, 0x75, 0x74, 0x68, 0x6F, 0x72, 0x69, 0x74, + 0x79); + EXPECT_NEXT_BYTES(header_bytes, 0x04, 0x68, 0x6F, 0x73, 0x74); + // Literal unindexed :path: / + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x05, 0x3A, 0x70, 0x61, 0x74, 0x68); + EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x2F); + // Literal unindexed foo: barbaz + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x03, 0x66, 0x6F, 0x6F); + EXPECT_NEXT_BYTES(header_bytes, 0x06, 0x62, 0x61, 0x72, 0x62, 0x61, 0x7A); + + // Validate HEADERS decode. + ServerCodecFrameInjector codec; + codec.write(WellKnownFrames::clientConnectionPrefaceFrame()); + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + TestHeaderMapImpl expected_headers; + HttpTestUtility::addDefaultHeaders(expected_headers); + expected_headers.addCopy("foo", "barbaz"); + EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); + codec.write(header.frame()); +} + +// Validate that a simple non-Huffman response HEADERS frame with no static table user either can be +// decoded. +TEST_F(ResponseFrameCommentTest, SimpleExamplePlain) { + FileFrame header{"response_header_corpus/simple_example_plain"}; + + // Validate HEADERS content matches intent. + auto header_bytes = header.istream(); + + // Payload size is 15 bytes. + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x00, 0x1F); + // HEADERS frame with END_STREAM | END_HEADERS for stream 1. + EXPECT_NEXT_BYTES(header_bytes, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01); + // Literal unindexed :status: 200 + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x07, 0x3A, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x03, 0x32, + 0x30, 0x30); + // Literal unindexed compression: test + EXPECT_NEXT_BYTES(header_bytes, 0x00, 0x0B, 0x63, 0x6F, 0x6D, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6F, 0x6E, 0x04, 0x74, 0x65, 0x73, 0x74); + + // Validate HEADERS decode. + ClientCodecFrameInjector codec; + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + TestHeaderMapImpl expected_headers; + expected_headers.addCopy(":status", "200"); + expected_headers.addCopy("compression", "test"); + EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); + codec.write(header.frame()); +} + +// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS frame doesn't crash or +// trigger ASSERTs. This is a litmus test for the HTTP/2 codec (nghttp2) to demonstrate that it +// doesn't suffer from the issue reported for http-parser in CVE-2019-9900. See also +// https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a non-compressed frame with no +// Huffman encoding to simplify. +TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { + FileFrame header{"request_header_corpus/simple_example_plain"}; + + for (size_t offset = 0; offset < header.frame().size(); ++offset) { + for (const char c : {'\0', '\n', '\r'}) { + // Corrupt a single byte in the HEADERS. + const char original = header.frame()[offset]; + header.frame()[offset] = c; + // Play the frames back. + ServerCodecFrameInjector codec; + codec.write(WellKnownFrames::clientConnectionPrefaceFrame()); + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + try { + EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); + EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); + codec.write(header.frame()); + } catch (const CodecProtocolException& e) { + ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); + } + header.frame()[offset] = original; + } + } +} + +// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS frame doesn't crash or +// trigger ASSERTs. This is a litmus test for the HTTP/2 codec (nghttp2) to demonsrate that it +// doesn't suffer from the issue reported for http-parser in CVE-2019-9900. See also +// https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a non-compressed frame with no +// Huffman encoding to simplify. +TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { + FileFrame header{"response_header_corpus/simple_example_plain"}; + + for (size_t offset = 0; offset < header.frame().size(); ++offset) { + for (const char c : {'\0', '\n', '\r'}) { + // Corrupt a single byte in the HEADERS. + const char original = header.frame()[offset]; + header.frame()[offset] = c; + // Play the frames back. + ClientCodecFrameInjector codec; + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + try { + EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); + EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); + codec.write(header.frame()); + } catch (const CodecProtocolException& e) { + ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); + } + header.frame()[offset] = original; + } + } +} + +// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS field name or value +// yields a CodecProtocolException or stream reset. This is a litmus test for the HTTP/2 codec +// (nghttp2) to demonsrate that it doesn't suffer from the issue reported for http-parser in +// CVE-2019-9900. See also https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a +// non-compressed frame with no Huffman encoding to simplify. +TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { + FileFrame header{"request_header_corpus/simple_example_plain"}; + + for (size_t offset = header.frame().size() - 11 /* foo: offset */; offset < header.frame().size(); + ++offset) { + for (const char c : {'\0', '\n', '\r'}) { + // Corrupt a single byte in the HEADERS. + const char original = header.frame()[offset]; + header.frame()[offset] = c; + // Play the frames back. + ServerCodecFrameInjector codec; + codec.write(WellKnownFrames::clientConnectionPrefaceFrame()); + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + bool stream_reset = false; + EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)) + .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; })); + bool codec_exception = false; + try { + codec.write(header.frame()); + } catch (const CodecProtocolException& e) { + codec_exception = true; + } + EXPECT_TRUE(stream_reset || codec_exception); + header.frame()[offset] = original; + } + } +} + +// Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS field name or value +// yields a CodecProtocolException or stream reset. This is a litmus test for the HTTP/2 codec +// (nghttp2) to demonsrate that it doesn't suffer from the issue reported for http-parser in +// CVE-2019-9900. See also https://httpwg.org/specs/rfc7540.html#rfc.section.10.3. We use a +// non-compressed frame with no Huffman encoding to simplify. +TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderField) { + FileFrame header{"response_header_corpus/simple_example_plain"}; + + for (size_t offset = header.frame().size() - 17 /* test: offset */; + offset < header.frame().size(); ++offset) { + for (const char c : {'\0', '\n', '\r'}) { + // Corrupt a single byte in the HEADERS. + const char original = header.frame()[offset]; + header.frame()[offset] = c; + // Play the frames back. + ClientCodecFrameInjector codec; + codec.write(WellKnownFrames::defaultSettingsFrame()); + codec.write(WellKnownFrames::initialWindowUpdateFrame()); + bool stream_reset = false; + EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)) + .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; })); + bool codec_exception = false; + try { + codec.write(header.frame()); + } catch (const CodecProtocolException& e) { + codec_exception = true; + } + EXPECT_TRUE(stream_reset || codec_exception); + header.frame()[offset] = original; + } + } +} + +} // namespace +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http2/request_header_corpus/simple_example_huffman b/test/common/http/http2/request_header_corpus/simple_example_huffman new file mode 100644 index 0000000000000000000000000000000000000000..90496d83da7beacac205245831347d0320df4ab7 GIT binary patch literal 27 icmZQz5MpFyU|?WuYjSL!^TNNyp=rwV)}Asq`+5LbdI<#p literal 0 HcmV?d00001 diff --git a/test/common/http/http2/request_header_corpus/simple_example_plain b/test/common/http/http2/request_header_corpus/simple_example_plain new file mode 100644 index 0000000000000000000000000000000000000000..0645c04067f608bfc10b0a4e001a1f9f51afb321 GIT binary patch literal 74 zcmZQzaAag Date: Sat, 13 Apr 2019 18:01:38 -0700 Subject: [PATCH 117/165] redis: fix asking output for ask redirection support (#6543) - issue separate, preceding "asking" command instead of prefixing "asking" to the redirected command. - combined all derived requests' onChildRedirection() methods into a single method. - fixed affected unit and integration tests. Signed-off-by: Mitch Sukalski --- .../redis_proxy/command_splitter_impl.cc | 152 ++++++------- .../redis_proxy/command_splitter_impl.h | 31 +-- .../redis_proxy/command_splitter_impl_test.cc | 201 ++++++++++++++++-- .../redis_proxy_integration_test.cc | 104 +++++++-- 4 files changed, 357 insertions(+), 131 deletions(-) diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index 136ad69169cc8..9fc6189a393e7 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -27,6 +27,26 @@ Common::Redis::RespValuePtr Utility::makeError(const std::string& error) { namespace { +// null_pool_callbacks is used for requests that must be filtered and not redirected such as +// "asking". +DoNothingPoolCallbacks null_pool_callbacks; + +// Create an asking command request. +const Common::Redis::RespValue& askingRequest() { + static Common::Redis::RespValue request; + static bool initialized = false; + + if (!initialized) { + Common::Redis::RespValue asking_cmd; + asking_cmd.type(Common::Redis::RespType::BulkString); + asking_cmd.asString() = "asking"; + request.type(Common::Redis::RespType::Array); + request.asArray().push_back(asking_cmd); + initialized = true; + } + return request; +} + /** * Validate the received moved/ask redirection error and the original redis request. * @param[in] original_request supplies the incoming request associated with the command splitter @@ -91,22 +111,6 @@ void SingleServerRequest::onFailure() { callbacks_.onResponse(Utility::makeError(Response::get().UpstreamFailure)); } -void SingleServerRequest::recreate(Common::Redis::RespValue& request, bool prepend_asking) { - if (!prepend_asking) { - request = *incoming_request_; - return; - } - - Common::Redis::RespValue asking_cmd; - asking_cmd.type(Common::Redis::RespType::BulkString); - asking_cmd.asString() = "asking"; - - request.type(Common::Redis::RespType::Array); - request.asArray().push_back(asking_cmd); - request.asArray().insert(request.asArray().end(), incoming_request_->asArray().begin(), - incoming_request_->asArray().end()); -} - bool SingleServerRequest::onRedirection(const Common::Redis::RespValue& value) { std::vector err; bool ask_redirection = false; @@ -114,11 +118,20 @@ bool SingleServerRequest::onRedirection(const Common::Redis::RespValue& value) { return false; } - Common::Redis::RespValue request; - recreate(request, ask_redirection); + // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash key + // slot (err[1]), and IP address and TCP port separated by a colon (err[2]). + const std::string host_address = std::string(err[2]); - const std::string host_address = std::string(err[2]); // ip:port - handle_ = conn_pool_->makeRequestToHost(host_address, request, *this); + // Prepend request with an asking command if redirected via an ASK error. The returned handle is + // not important since there is no point in being able to cancel the request. The use of + // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the + // "asking" command; this is fine since the server either responds with an OK or an error message + // if cluster support is not enabled (in which case we should not get an ASK redirection error). + if (ask_redirection && + !conn_pool_->makeRequestToHost(host_address, askingRequest(), null_pool_callbacks)) { + return false; + } + handle_ = conn_pool_->makeRequestToHost(host_address, *incoming_request_, *this); return (handle_ != nullptr); } @@ -240,6 +253,35 @@ SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, return nullptr; } +bool FragmentedRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, + ConnPool::Instance* conn_pool) { + std::vector err; + bool ask_redirection = false; + if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { + return false; + } + + // MOVED and ASK redirection errors have the following substrings: MOVED or ASK (err[0]), hash key + // slot (err[1]), and IP address and TCP port separated by a colon (err[2]). + std::string host_address = std::string(err[2]); + Common::Redis::RespValue request; + recreate(request, index); + + // Prepend request with an asking command if redirected via an ASK error. The returned handle is + // not important since there is no point in being able to cancel the request. The use of + // null_pool_callbacks ensures the transparent filtering of the Redis server's response to the + // "asking" command; this is fine since the server either responds with an OK or an error message + // if cluster support is not enabled (in which case we should not get an ASK redirection error). + if (ask_redirection && + !conn_pool->makeRequestToHost(host_address, askingRequest(), null_pool_callbacks)) { + return false; + } + + this->pending_requests_[index].handle_ = + conn_pool->makeRequestToHost(host_address, request, this->pending_requests_[index]); + return (this->pending_requests_[index].handle_ != nullptr); +} + void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) { pending_requests_[index].handle_ = nullptr; @@ -273,9 +315,9 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -void MGETRequest::recreate(Common::Redis::RespValue& request, uint32_t index, bool prepend_asking) { +void MGETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { static const uint32_t GET_COMMAND_SUBSTRINGS = 2; - uint32_t num_values = prepend_asking ? (GET_COMMAND_SUBSTRINGS + 1) : GET_COMMAND_SUBSTRINGS; + uint32_t num_values = GET_COMMAND_SUBSTRINGS; std::vector values(num_values); for (uint32_t i = 0; i < num_values; i++) { @@ -283,30 +325,11 @@ void MGETRequest::recreate(Common::Redis::RespValue& request, uint32_t index, bo } values[--num_values].asString() = incoming_request_->asArray()[index + 1].asString(); values[--num_values].asString() = "get"; - if (prepend_asking) { - values[--num_values].asString() = "asking"; - } request.type(Common::Redis::RespType::Array); request.asArray().swap(values); } -bool MGETRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, - ConnPool::Instance* conn_pool) { - std::vector err; - bool ask_redirection = false; - if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { - return false; - } - - Common::Redis::RespValue request; - recreate(request, index, ask_redirection); - - this->pending_requests_[index].handle_ = - conn_pool->makeRequestToHost(std::string(err[2]), request, this->pending_requests_[index]); - return (this->pending_requests_[index].handle_ != nullptr); -} - SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, @@ -388,9 +411,9 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -void MSETRequest::recreate(Common::Redis::RespValue& request, uint32_t index, bool prepend_asking) { +void MSETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { static const uint32_t SET_COMMAND_SUBSTRINGS = 3; - uint32_t num_values = prepend_asking ? (SET_COMMAND_SUBSTRINGS + 1) : SET_COMMAND_SUBSTRINGS; + uint32_t num_values = SET_COMMAND_SUBSTRINGS; std::vector values(num_values); for (uint32_t i = 0; i < num_values; i++) { @@ -399,30 +422,11 @@ void MSETRequest::recreate(Common::Redis::RespValue& request, uint32_t index, bo values[--num_values].asString() = incoming_request_->asArray()[(index * 2) + 2].asString(); values[--num_values].asString() = incoming_request_->asArray()[(index * 2) + 1].asString(); values[--num_values].asString() = "set"; - if (prepend_asking) { - values[--num_values].asString() = "asking"; - } request.type(Common::Redis::RespType::Array); request.asArray().swap(values); } -bool MSETRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, - ConnPool::Instance* conn_pool) { - std::vector err; - bool ask_redirection = false; - if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { - return false; - } - - Common::Redis::RespValue request; - recreate(request, index, ask_redirection); - - this->pending_requests_[index].handle_ = - conn_pool->makeRequestToHost(std::string(err[2]), request, this->pending_requests_[index]); - return (this->pending_requests_[index].handle_ != nullptr); -} - SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, @@ -496,10 +500,9 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va } } -void SplitKeysSumResultRequest::recreate(Common::Redis::RespValue& request, uint32_t index, - bool prepend_asking) { +void SplitKeysSumResultRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { static const uint32_t BASE_COMMAND_SUBSTRINGS = 2; - uint32_t num_values = prepend_asking ? (BASE_COMMAND_SUBSTRINGS + 1) : BASE_COMMAND_SUBSTRINGS; + uint32_t num_values = BASE_COMMAND_SUBSTRINGS; std::vector values(num_values); for (uint32_t i = 0; i < num_values; i++) { @@ -507,30 +510,11 @@ void SplitKeysSumResultRequest::recreate(Common::Redis::RespValue& request, uint } values[--num_values].asString() = incoming_request_->asArray()[index + 1].asString(); values[--num_values].asString() = incoming_request_->asArray()[0].asString(); - if (prepend_asking) { - values[--num_values].asString() = "asking"; - } request.type(Common::Redis::RespType::Array); request.asArray().swap(values); } -bool SplitKeysSumResultRequest::onChildRedirection(const Common::Redis::RespValue& value, - uint32_t index, ConnPool::Instance* conn_pool) { - std::vector err; - bool ask_redirection = false; - if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { - return false; - } - - Common::Redis::RespValue request; - recreate(request, index, ask_redirection); - - this->pending_requests_[index].handle_ = - conn_pool->makeRequestToHost(std::string(err[2]), request, this->pending_requests_[index]); - return (this->pending_requests_[index].handle_ != nullptr); -} - InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, const std::string& stat_prefix, TimeSource& time_source, bool latency_in_micros) diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index e6b1d475464e3..21eb847c73cf0 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -113,8 +113,6 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien TimeSource& time_source, bool latency_in_micros) : SplitRequestBase(command_stats, time_source, latency_in_micros), callbacks_(callbacks) {} - void recreate(Common::Redis::RespValue& request, bool prepend_asking); - SplitCallbacks& callbacks_; ConnPool::Instance* conn_pool_{}; Common::Redis::Client::PoolRequest* handle_{}; @@ -191,8 +189,9 @@ class FragmentedRequest : public SplitRequestBase { virtual void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) PURE; void onChildFailure(uint32_t index); - virtual bool onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, - ConnPool::Instance* conn_pool) PURE; + bool onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, + ConnPool::Instance* conn_pool); + virtual void recreate(Common::Redis::RespValue& request, uint32_t index) PURE; SplitCallbacks& callbacks_; @@ -221,9 +220,7 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { TimeSource& time_source_; }; +/** + * DoNothingPoolCallbacks is used for internally generated commands whose response is + * transparently filtered, and redirection never occurs (e.g., "asking", etc.). + */ +class DoNothingPoolCallbacks : public Common::Redis::Client::PoolCallbacks { +public: + // Common::Redis::Client::PoolCallbacks + void onResponse(Common::Redis::RespValuePtr&&) override {} + void onFailure() override {} + bool onRedirection(const Common::Redis::RespValue&) override { return false; } +}; + } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index b4f2c8fb70110..ae7a9839147b6 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -385,6 +385,14 @@ TEST_F(RedisSingleServerRequestTest, MovedRedirectionFailure) { moved_response.asInteger() = 1; EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response)); + // Test an upstream error preventing the request from being sent. + moved_response.type(Common::Redis::RespType::Error); + moved_response.asString() = "MOVED 1111 10.1.2.3:4000"; + std::string host_address; + Common::Redis::RespValue request_copy; + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)).WillOnce(Return(nullptr)); + EXPECT_FALSE(pool_callbacks_->onRedirection(moved_response)); + respond(); }; @@ -412,7 +420,7 @@ TEST_F(RedisSingleServerRequestTest, RedirectionFailure) { TEST_F(RedisSingleServerRequestTest, AskRedirectionSuccess) { InSequence s; - Common::Redis::Client::MockPoolRequest pool_request2; + Common::Redis::Client::MockPoolRequest pool_request2, pool_request3; Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; makeBulkStringArray(*request, {"get", "foo"}); makeRequest("foo", std::move(request)); @@ -421,12 +429,12 @@ TEST_F(RedisSingleServerRequestTest, AskRedirectionSuccess) { Common::Redis::RespValue ask_response; ask_response.type(Common::Redis::RespType::Error); ask_response.asString() = "ASK 1111 10.1.2.3:4000"; - EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_))) + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)) .WillOnce( Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { - // Verify that the request has been properly modified in place with an "asking" prefix. - std::vector commands = {"asking", "get", "foo"}; + // Verify that the request has been properly prepended with an "asking" command. + std::vector commands = {"asking"}; EXPECT_EQ(host_address, "10.1.2.3:4000"); EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); EXPECT_EQ(request.asArray().size(), commands.size()); @@ -436,6 +444,20 @@ TEST_F(RedisSingleServerRequestTest, AskRedirectionSuccess) { } return &pool_request2; })); + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_))) + .WillOnce( + Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + std::vector commands = {"get", "foo"}; + EXPECT_EQ(host_address, "10.1.2.3:4000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), commands.size()); + for (unsigned int i = 0; i < commands.size(); i++) { + EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[i].asString(), commands[i]); + } + return &pool_request3; + })); EXPECT_TRUE(pool_callbacks_->onRedirection(ask_response)); respond(); }; @@ -458,6 +480,60 @@ TEST_F(RedisSingleServerRequestTest, AskRedirectionFailure) { ask_response.asInteger() = 1; EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response)); + // Test an upstream error from trying to send an "asking" command upstream. + ask_response.type(Common::Redis::RespType::Error); + ask_response.asString() = "ASK 1111 10.1.2.3:4000"; + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)) + .WillOnce( + Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + // Verify that the request has been properly prepended with an "asking" command. + std::vector commands = {"asking"}; + EXPECT_EQ(host_address, "10.1.2.3:4000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), commands.size()); + for (unsigned int i = 0; i < commands.size(); i++) { + EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[i].asString(), commands[i]); + } + return nullptr; + })); + EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response)); + + // Test an upstream error from trying to send the original request after the "asking" command is + // sent successfully. + Common::Redis::Client::MockPoolRequest pool_request; + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)) + .WillOnce( + Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + // Verify that the request has been properly prepended with an "asking" command. + std::vector commands = {"asking"}; + EXPECT_EQ(host_address, "10.1.2.3:4000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), commands.size()); + for (unsigned int i = 0; i < commands.size(); i++) { + EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[i].asString(), commands[i]); + } + return &pool_request; + })); + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_))) + .WillOnce( + Invoke([&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + std::vector commands = {"get", "foo"}; + EXPECT_EQ(host_address, "10.1.2.3:4000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), commands.size()); + for (unsigned int i = 0; i < commands.size(); i++) { + EXPECT_TRUE(request.asArray()[i].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[i].asString(), commands[i]); + } + return nullptr; + })); + EXPECT_FALSE(pool_callbacks_->onRedirection(ask_response)); + respond(); }; @@ -677,6 +753,28 @@ TEST_F(RedisMGETCommandHandlerTest, NormalWithMovedRedirection) { Common::Redis::RespValue moved_response; moved_response.type(Common::Redis::RespType::Error); moved_response.asString() = "MOVED 1234 192.168.0.1:5000"; // Exact values are not important. + + // Test with simulated upstream failures. This exercises code in + // FragmentedRequest::onChildRedirection() common to MGET, MSET, and SplitKeysSumResult commands. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 2); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "get"); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return nullptr; + })); + EXPECT_FALSE(pool_callbacks_[i]->onRedirection(moved_response)); + } + + // Test "successful" redirection. for (unsigned int i = 0; i < 2; i++) { EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) .WillOnce(Invoke( @@ -738,20 +836,65 @@ TEST_F(RedisMGETCommandHandlerTest, NormalWithAskRedirection) { Common::Redis::RespValue ask_response; ask_response.type(Common::Redis::RespType::Error); ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important. + Common::Redis::Client::MockPoolRequest dummy_poolrequest; + + // Test redirection with simulated upstream failures. This exercises code in + // FragmentedRequest::onChildRedirection() common to MGET, MSET, and SplitKeysSumResult commands. for (unsigned int i = 0; i < 2; i++) { - EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)) .WillOnce(Invoke( [&](const std::string& host_address, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { EXPECT_EQ(host_address, "192.168.0.1:5000"); EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); - EXPECT_EQ(request.asArray().size(), 3); + EXPECT_EQ(request.asArray().size(), 1); EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); EXPECT_EQ(request.asArray()[0].asString(), "asking"); + return (i == 0 ? nullptr : &dummy_poolrequest); + })); + if (i == 1) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 2); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "get"); + EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); + EXPECT_NE(&pool_requests_[i], nullptr); + return (i == 1 ? nullptr : &pool_requests_[i]); + })); + } + EXPECT_FALSE(pool_callbacks_[i]->onRedirection(ask_response)); + } + + // Test "successful" redirection. + for (unsigned int i = 0; i < 2; i++) { + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 1); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "asking"); + return &dummy_poolrequest; + })); + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 2); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "get"); EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); - EXPECT_EQ(request.asArray()[1].asString(), "get"); - EXPECT_TRUE(request.asArray()[2].type() == Common::Redis::RespType::BulkString); - EXPECT_EQ(request.asArray()[2].asString(), std::to_string(i)); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); EXPECT_NE(&pool_requests_[i], nullptr); return &pool_requests_[i]; })); @@ -985,22 +1128,32 @@ TEST_F(RedisMSETCommandHandlerTest, NormalWithAskRedirection) { Common::Redis::RespValue ask_response; ask_response.type(Common::Redis::RespType::Error); ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important. + Common::Redis::Client::MockPoolRequest dummy_poolrequest; for (unsigned int i = 0; i < 2; i++) { - EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)) .WillOnce(Invoke( [&](const std::string& host_address, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { EXPECT_EQ(host_address, "192.168.0.1:5000"); EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); - EXPECT_EQ(request.asArray().size(), 4); + EXPECT_EQ(request.asArray().size(), 1); EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); EXPECT_EQ(request.asArray()[0].asString(), "asking"); + return &dummy_poolrequest; + })); + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 3); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), "set"); EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); - EXPECT_EQ(request.asArray()[1].asString(), "set"); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); EXPECT_TRUE(request.asArray()[2].type() == Common::Redis::RespType::BulkString); EXPECT_EQ(request.asArray()[2].asString(), std::to_string(i)); - EXPECT_TRUE(request.asArray()[3].type() == Common::Redis::RespType::BulkString); - EXPECT_EQ(request.asArray()[3].asString(), std::to_string(i)); EXPECT_NE(&pool_requests_[i], nullptr); return &pool_requests_[i]; })); @@ -1209,20 +1362,30 @@ TEST_P(RedisSplitKeysSumResultHandlerTest, NormalWithAskRedirection) { Common::Redis::RespValue ask_response; ask_response.type(Common::Redis::RespType::Error); ask_response.asString() = "ASK 1234 192.168.0.1:5000"; // Exact values are not important. + Common::Redis::Client::MockPoolRequest dummy_poolrequest; for (unsigned int i = 0; i < 2; i++) { - EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, _)) .WillOnce(Invoke( [&](const std::string& host_address, const Common::Redis::RespValue& request, Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { EXPECT_EQ(host_address, "192.168.0.1:5000"); EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); - EXPECT_EQ(request.asArray().size(), 3); + EXPECT_EQ(request.asArray().size(), 1); EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); EXPECT_EQ(request.asArray()[0].asString(), "asking"); + return &dummy_poolrequest; + })); + EXPECT_CALL(*conn_pool_, makeRequestToHost(_, _, Ref(*pool_callbacks_[i]))) + .WillOnce(Invoke( + [&](const std::string& host_address, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks&) -> Common::Redis::Client::PoolRequest* { + EXPECT_EQ(host_address, "192.168.0.1:5000"); + EXPECT_TRUE(request.type() == Common::Redis::RespType::Array); + EXPECT_EQ(request.asArray().size(), 2); + EXPECT_TRUE(request.asArray()[0].type() == Common::Redis::RespType::BulkString); + EXPECT_EQ(request.asArray()[0].asString(), GetParam()); EXPECT_TRUE(request.asArray()[1].type() == Common::Redis::RespType::BulkString); - EXPECT_EQ(request.asArray()[1].asString(), GetParam()); - EXPECT_TRUE(request.asArray()[2].type() == Common::Redis::RespType::BulkString); - EXPECT_EQ(request.asArray()[2].asString(), std::to_string(i)); + EXPECT_EQ(request.asArray()[1].asString(), std::to_string(i)); EXPECT_NE(&pool_requests_[i], nullptr); return &pool_requests_[i]; })); diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index ab727c0f846c6..d23c2b2649514 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -139,12 +139,13 @@ class RedisProxyWithRedirectionIntegrationTest : public RedisProxyIntegrationTes * @param target_server a handle to the second server that will respond to the request. * @param request supplies client data to transmit to the first upstream server. * @param redirection_response supplies the moved or ask redirection error from the first server. - * @param received_request suplies data received by the second server from the proxy. * @param response supplies data sent by the second server back to the fake Redis client. + * @param asking_response supplies the target_server's response to an "asking" command, if + * appropriate. */ void simpleRedirection(FakeUpstreamPtr& target_server, const std::string& request, - const std::string& redirection_response, - const std::string& received_request, const std::string& response); + const std::string& redirection_response, const std::string& response, + const std::string& asking_response = "+OK\r\n"); }; INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyIntegrationTest, @@ -201,8 +202,10 @@ void RedisProxyIntegrationTest::simpleProxyResponse(const std::string& request, void RedisProxyWithRedirectionIntegrationTest::simpleRedirection( FakeUpstreamPtr& target_server, const std::string& request, - const std::string& redirection_response, const std::string& received_request, - const std::string& response) { + const std::string& redirection_response, const std::string& response, + const std::string& asking_response) { + + bool asking = (redirection_response.find("-ASK") != std::string::npos); std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); redis_client->write(request); @@ -221,10 +224,20 @@ void RedisProxyWithRedirectionIntegrationTest::simpleRedirection( // The proxy should initiate a new connection to the fake redis server, target_server, in // response. EXPECT_TRUE(target_server->waitForRawConnection(fake_upstream_connection_2)); - // The server, target_server, should receive received_request which may or may not be the same as - // the original request. - EXPECT_TRUE(fake_upstream_connection_2->waitForData(received_request.size(), &proxy_to_server)); - EXPECT_EQ(received_request, proxy_to_server); + + if (asking) { + // The server, target_server, should receive an "asking" command before the original request. + std::string asking_request = makeBulkStringArray({"asking"}); + EXPECT_TRUE(fake_upstream_connection_2->waitForData(asking_request.size() + request.size(), + &proxy_to_server)); + EXPECT_EQ(asking_request + request, proxy_to_server); + // Respond to the "asking" command. + EXPECT_TRUE(fake_upstream_connection_2->write(asking_response)); + } else { + // The server, target_server, should receive request unchanged. + EXPECT_TRUE(fake_upstream_connection_2->waitForData(request.size(), &proxy_to_server)); + EXPECT_EQ(request, proxy_to_server); + } // Send response from the second fake Redis server, target_server, to the client. EXPECT_TRUE(fake_upstream_connection_2->write(response)); @@ -287,12 +300,11 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, RedirectToKnownServer) { initialize(); std::stringstream redirection_error; redirection_error << "-MOVED 1111 " << redisAddressAndPort(fake_upstreams_[1]) << "\r\n"; - simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), request, "$3\r\nbar\r\n"); + simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), "$3\r\nbar\r\n"); redirection_error.str(""); redirection_error << "-ASK 1111 " << redisAddressAndPort(fake_upstreams_[1]) << "\r\n"; - simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), - makeBulkStringArray({"asking", "get", "foo"}), "$3\r\nbar\r\n"); + simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), "$3\r\nbar\r\n"); } // This test sends a simple Redis commands to a sequence of fake upstream @@ -312,12 +324,11 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, RedirectToUnknownServer) { std::stringstream redirection_error; redirection_error << "-MOVED 1111 " << redisAddressAndPort(target_server) << "\r\n"; - simpleRedirection(target_server, request, redirection_error.str(), request, "$3\r\nbar\r\n"); + simpleRedirection(target_server, request, redirection_error.str(), "$3\r\nbar\r\n"); redirection_error.str(""); redirection_error << "-ASK 1111 " << redisAddressAndPort(target_server) << "\r\n"; - simpleRedirection(target_server, request, redirection_error.str(), - makeBulkStringArray({"asking", "get", "foo"}), "$3\r\nbar\r\n"); + simpleRedirection(target_server, request, redirection_error.str(), "$3\r\nbar\r\n"); } // This test verifies that various forms of bad MOVED/ASK redirection errors @@ -371,5 +382,68 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, BadRedirectStrings) { } } +// This test verifies that an upstream connection failure during ask redirection processing is +// handled correctly. In this case the "asking" command and original client request have been sent +// to the target server, and then the connection is closed. The fake Redis client should receive an +// upstream failure error in response to its request. + +TEST_P(RedisProxyWithRedirectionIntegrationTest, ConnectionFailureBeforeAskingResponse) { + initialize(); + + std::string request = makeBulkStringArray({"get", "foo"}); + std::stringstream redirection_error; + redirection_error << "-ASK 1111 " << redisAddressAndPort(fake_upstreams_[1]) << "\r\n"; + + std::string proxy_to_server; + IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); + redis_client->write(request); + + FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; + + // Data from the client should always be routed to fake_upstreams_[0] by the load balancer. + EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_1)); + EXPECT_TRUE(fake_upstream_connection_1->waitForData(request.size(), &proxy_to_server)); + // The data in request should be received by the first server, fake_upstreams_[0]. + EXPECT_EQ(request, proxy_to_server); + proxy_to_server.clear(); + + // Send the redirection_response from the first fake Redis server back to the proxy. + EXPECT_TRUE(fake_upstream_connection_1->write(redirection_error.str())); + // The proxy should initiate a new connection to the fake redis server, target_server, in + // response. + EXPECT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_2)); + + // The server, fake_upstreams_[1], should receive an "asking" command before the original request. + std::string asking_request = makeBulkStringArray({"asking"}); + EXPECT_TRUE(fake_upstream_connection_2->waitForData(asking_request.size() + request.size(), + &proxy_to_server)); + EXPECT_EQ(asking_request + request, proxy_to_server); + // Close the upstream connection before responding to the "asking" command. + EXPECT_TRUE(fake_upstream_connection_2->close()); + + // The fake Redis client should receive an upstream failure error from the proxy. + std::stringstream error_response; + error_response << "-" << RedisCmdSplitter::Response::get().UpstreamFailure << "\r\n"; + redis_client->waitForData(error_response.str()); + EXPECT_EQ(error_response.str(), redis_client->data()); + + redis_client->close(); + EXPECT_TRUE(fake_upstream_connection_1->close()); +} + +// This test verifies that a ASK redirection error as a response to an "asking" command is ignored. +// This is a negative test scenario that should never happen since a Redis server will reply to an +// "asking" command with either a "cluster support not enabled" error or "OK". + +TEST_P(RedisProxyWithRedirectionIntegrationTest, IgnoreRedirectionForAsking) { + initialize(); + std::string request = makeBulkStringArray({"get", "foo"}); + std::stringstream redirection_error, asking_response; + redirection_error << "-ASK 1111 " << redisAddressAndPort(fake_upstreams_[1]) << "\r\n"; + asking_response << "-ASK 1111 " << redisAddressAndPort(fake_upstreams_[0]) << "\r\n"; + simpleRedirection(fake_upstreams_[1], request, redirection_error.str(), "$3\r\nbar\r\n", + asking_response.str()); +} + } // namespace } // namespace Envoy From 02ed57d648d82c5e5ef72070b145d2a28c9b404c Mon Sep 17 00:00:00 2001 From: Yaroslav Skopets Date: Mon, 15 Apr 2019 05:52:47 +0200 Subject: [PATCH 118/165] docs: correct a package of AggregatedDiscoveryService (#6572) Signed-off-by: Yaroslav Skopets --- docs/root/configuration/overview/v2_overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst index 6066f0d359331..30b6066a98206 100644 --- a/docs/root/configuration/overview/v2_overview.rst +++ b/docs/root/configuration/overview/v2_overview.rst @@ -528,7 +528,7 @@ in :repo:`this ` document. The gRPC endpoint is: -.. http:post:: /envoy.api.v2.AggregatedDiscoveryService/StreamAggregatedResources +.. http:post:: /envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources See :repo:`discovery.proto ` From b82dc5d25712c91fb38954b49acb3a1692bdf921 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 15 Apr 2019 07:24:38 -0700 Subject: [PATCH 119/165] cluster manager: destroy warming clusters during shutdown() (#6577) Fixes https://github.com/envoyproxy/envoy/issues/6513. This issue has existed for quite some time, so I'm unclear why we just started seeing this. It's possible it's chance and it's also possible it's in some way related to the init changes, but either way, this is the correct fix. Risk Level: Low Testing: New UT Signed-off-by: Matt Klein --- source/common/upstream/cluster_manager_impl.h | 3 ++ .../upstream/cluster_manager_impl_test.cc | 28 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 9586357bc4aa1..4cb0914ee20c4 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -203,9 +203,12 @@ class ClusterManagerImpl : public ClusterManager, Logger::LoggablesetInitializedCb([&]() -> void { initialized.ready(); }); + + std::shared_ptr cluster1(new NiceMock()); + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)).WillOnce(Return(cluster1)); + EXPECT_CALL(*cluster1, initializePhase()).Times(0); + EXPECT_CALL(*cluster1, initialize(_)); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version1", + dummyWarmingCb)); + checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); + cluster_manager_->shutdown(); + checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 0 /*warming*/); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); +} + TEST_F(ClusterManagerImplTest, DynamicAddRemove) { const std::string json = R"EOF( { From c1e29e51b4a7362c6c05dc632c12576ce1ea7aef Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Mon, 15 Apr 2019 11:12:33 -0400 Subject: [PATCH 120/165] Don't use simulated time yet -- still flaky. (#6579) Flakes noticed in CI: backing this out for now to keep CI healthy Risk Level: low Testing:just the one test Signed-off-by: Joshua Marantz --- test/integration/hds_integration_test.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index eb4eba9ce2893..dd8fe59451747 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -26,7 +26,6 @@ namespace { // TODO(jmarantz): switch this to simulated-time after debugging flakes. class HdsIntegrationTest : public testing::TestWithParam, - public Event::TestUsingSimulatedTime, public HttpIntegrationTest { public: HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} From a368b4c35a7ae1353d9878e65f05c9a7037f2ea0 Mon Sep 17 00:00:00 2001 From: Christopher Date: Mon, 15 Apr 2019 13:41:23 -0400 Subject: [PATCH 121/165] Upgrading docker-compose version for front proxy example (#6574) Signed-off-by: Chris Paika --- examples/front-proxy/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/front-proxy/docker-compose.yml b/examples/front-proxy/docker-compose.yml index 2c121d598b73c..3d371f889c694 100644 --- a/examples/front-proxy/docker-compose.yml +++ b/examples/front-proxy/docker-compose.yml @@ -1,4 +1,4 @@ -version: '2' +version: '3.7' services: front-envoy: From a801993676586cb3af40593f1dbbd4e89722493e Mon Sep 17 00:00:00 2001 From: soya3129 <43042079+soya3129@users.noreply.github.com> Date: Mon, 15 Apr 2019 14:06:18 -0400 Subject: [PATCH 122/165] Use StopAllIterationAndWatermark in ext_auth filter (#6565) Description: Use StopAllIteration status for ext_authz filter. Risk Level: high. Testing: unit testing. Docs Changes: No behavior change expected. Release Notes: n/a Signed-off-by: Yang Song --- .../filters/http/ext_authz/ext_authz.cc | 25 +++++---- .../filters/http/ext_authz/ext_authz_test.cc | 51 +++++++++---------- 2 files changed, 40 insertions(+), 36 deletions(-) diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 6a719bac889c3..cb93de3af6386 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -81,8 +81,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e } initiateCall(headers); - return filter_return_ == FilterReturn::StopDecoding ? Http::FilterHeadersStatus::StopIteration - : Http::FilterHeadersStatus::Continue; + return filter_return_ == FilterReturn::StopDecoding + ? Http::FilterHeadersStatus::StopAllIterationAndWatermark + : Http::FilterHeadersStatus::Continue; } Http::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool end_stream) { @@ -90,24 +91,28 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance&, bool end_stream) { if (end_stream || isBufferFull()) { ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); initiateCall(*request_headers_); + return filter_return_ == FilterReturn::StopDecoding + ? Http::FilterDataStatus::StopIterationAndWatermark + : Http::FilterDataStatus::Continue; } else { return Http::FilterDataStatus::StopIterationAndBuffer; } } - return filter_return_ == FilterReturn::StopDecoding - ? Http::FilterDataStatus::StopIterationAndWatermark - : Http::FilterDataStatus::Continue; + return Http::FilterDataStatus::Continue; } Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap&) { - if (buffer_data_ && filter_return_ != FilterReturn::StopDecoding) { - ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); - initiateCall(*request_headers_); + if (buffer_data_) { + if (filter_return_ != FilterReturn::StopDecoding) { + ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); + initiateCall(*request_headers_); + } + return filter_return_ == FilterReturn::StopDecoding ? Http::FilterTrailersStatus::StopIteration + : Http::FilterTrailersStatus::Continue; } - return filter_return_ == FilterReturn::StopDecoding ? Http::FilterTrailersStatus::StopIteration - : Http::FilterTrailersStatus::Continue; + return Http::FilterTrailersStatus::Continue; } void Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) { diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 64a2fbeffda0d..6221e95dda3c4 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -174,7 +174,7 @@ TEST_F(HttpFilterTest, ErrorFailClose) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -204,7 +204,7 @@ TEST_F(HttpFilterTest, ErrorOpen) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); EXPECT_CALL(filter_callbacks_, continueDecoding()); @@ -408,10 +408,9 @@ TEST_F(HttpFilterTest, HeaderOnlyRequest) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, true)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, true)); - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); + // decodeData() and decodeTrailers() will not be called since request is header only. } // Checks that filter does not buffer data on upgrade WebSocket request. @@ -438,10 +437,9 @@ TEST_F(HttpFilterTest, UpgradeWebsocketRequest) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); + // decodeData() and decodeTrailers() will not be called until continueDecoding() is called. } // Checks that filter does not buffer data on upgrade H2 WebSocket request. @@ -467,10 +465,9 @@ TEST_F(HttpFilterTest, H2UpgradeRequest) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); + // decodeData() and decodeTrailers() will not be called until continueDecoding() is called. } // Checks that filter does not buffer data when is not the end of the stream, but header-only @@ -568,7 +565,7 @@ TEST_F(HttpFilterTestParam, DisabledOnRoute) { test_disable(false); EXPECT_CALL(*client_, check(_, _, _)).Times(1); // Engage the filter. - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); // test that disabling works @@ -606,10 +603,8 @@ TEST_F(HttpFilterTestParam, OkResponse) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); EXPECT_CALL(filter_callbacks_, continueDecoding()); EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) @@ -619,6 +614,10 @@ TEST_F(HttpFilterTestParam, OkResponse) { response.status = Filters::Common::ExtAuthz::CheckStatus::OK; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + + // decodeData() and decodeTrailers() are called after continueDecoding(). + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_headers_)); } // Test that an synchronous OK response from the authorization service, on the call stack, results @@ -664,11 +663,11 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { callbacks.onComplete(std::move(response_ptr)); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + // When request is denied, no call to continueDecoding(). As a result, decodeData() and + // decodeTrailer() will not be called. } // Test that an synchronous ok response from the authorization service passing additional HTTP @@ -726,11 +725,11 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponse) { callbacks.onComplete(std::make_unique(response)); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + // When request is denied, no call to continueDecoding(). As a result, decodeData() and + // decodeTrailer() will not be called. } // Test that a denied response results in the connection closing with a 401 response to the client. @@ -743,7 +742,7 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith401) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); Http::TestHeaderMapImpl response_headers{{":status", "401"}}; @@ -770,7 +769,7 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith403) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); Http::TestHeaderMapImpl response_headers{{":status", "403"}}; @@ -807,7 +806,7 @@ TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); Http::TestHeaderMapImpl response_headers{{":status", "403"}, @@ -852,7 +851,7 @@ TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); Http::TestHeaderMapImpl response_headers{{":status", "403"}, @@ -894,7 +893,7 @@ TEST_F(HttpFilterTestParam, ResetDuringCall) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); EXPECT_CALL(*client_, cancel()); filter_->onDestroy(); From d0f992c8fe3f8bcb3ad2ad26f033ceb841d3cb04 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Mon, 15 Apr 2019 16:51:35 -0400 Subject: [PATCH 123/165] add response code details to stream info (#6530) Signed-off-by: Elisha Ziskind --- include/envoy/stream_info/BUILD | 1 + include/envoy/stream_info/stream_info.h | 29 +++++++++++++++++++ source/common/http/async_client_impl.h | 2 ++ source/common/http/conn_manager_impl.h | 2 ++ source/common/router/router.cc | 2 ++ source/common/stream_info/stream_info_impl.h | 9 ++++++ test/common/http/conn_manager_impl_test.cc | 3 ++ test/common/router/router_test.cc | 23 +++++++++++++++ .../stream_info/stream_info_impl_test.cc | 5 ++++ test/common/stream_info/test_util.h | 7 +++++ test/mocks/stream_info/mocks.cc | 1 + test/mocks/stream_info/mocks.h | 3 ++ 12 files changed, 87 insertions(+) diff --git a/include/envoy/stream_info/BUILD b/include/envoy/stream_info/BUILD index defe80c196a95..68a28a8ac6c61 100644 --- a/include/envoy/stream_info/BUILD +++ b/include/envoy/stream_info/BUILD @@ -20,6 +20,7 @@ envoy_cc_library( "//include/envoy/upstream:host_description_interface", "//source/common/common:assert_lib", "//source/common/protobuf", + "//source/common/singleton:const_singleton", ], ) diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index e30218e317554..3bdb8a6e69e75 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -14,6 +14,7 @@ #include "common/common/assert.h" #include "common/protobuf/protobuf.h" +#include "common/singleton/const_singleton.h" #include "absl/types/optional.h" @@ -64,6 +65,23 @@ enum ResponseFlag { LastFlag = StreamIdleTimeout }; +/** + * Constants for the response code details field of StreamInfo. + * + * These provide details about the stream state such as whether the + * response is from the upstream or from envoy (in case of a local reply). + * Custom extensions can define additional values provided they are appropriately + * scoped to avoid collisions. + */ +struct ResponseCodeDetailValues { + // Response code was set by the upstream. + const std::string ViaUpstream = "via_upstream"; + + // TODO(#6542): add values for sendLocalReply use-cases +}; + +typedef ConstSingleton ResponseCodeDetails; + struct UpstreamTiming { /** * Sets the time when the first byte of the request was sent upstream. @@ -116,6 +134,12 @@ class StreamInfo { */ virtual void setResponseFlag(ResponseFlag response_flag) PURE; + /** + * @param rc_details the response code details string to set for this request. + * See ResponseCodeDetailValues above for well-known constants. + */ + virtual void setResponseCodeDetails(absl::string_view rc_details) PURE; + /** * @param response_flags the response_flags to intersect with. * @return true if the intersection of the response_flags argument and the currently set response @@ -153,6 +177,11 @@ class StreamInfo { */ virtual absl::optional responseCode() const PURE; + /** + * @return the response code details. + */ + virtual const absl::optional& responseCodeDetails() const PURE; + /** * @return the time that the first byte of the request was received. */ diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 35fffefc2c473..7f48d5234e14a 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -321,6 +321,8 @@ class AsyncStreamImpl : public AsyncClient::Stream, void sendLocalReply(Code code, absl::string_view body, std::function modify_headers, const absl::optional grpc_status) override { + // TODO(#6542): add an extra parameter for setting rc details + stream_info_.setResponseCodeDetails(""); Utility::sendLocalReply( is_grpc_request_, [this, modify_headers](HeaderMapPtr&& headers, bool end_stream) -> void { diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index a968d1109f7a4..2ba321902cf6f 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -222,6 +222,8 @@ class ConnectionManagerImpl : Logger::Loggable, void sendLocalReply(Code code, absl::string_view body, std::function modify_headers, const absl::optional grpc_status) override { + // TODO(#6542): add an extra parameter for setting rc details + parent_.stream_info_.setResponseCodeDetails(""); parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, parent_.is_head_request_, grpc_status); } diff --git a/source/common/router/router.cc b/source/common/router/router.cc index e4b18f231de17..f267e98229214 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -792,6 +792,8 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& head onUpstreamComplete(); } + callbacks_->streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().ViaUpstream); callbacks_->encodeHeaders(std::move(headers), end_stream); } diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index d2cc842eeeffa..fcdb953a3cc7c 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -100,6 +100,14 @@ struct StreamInfoImpl : public StreamInfo { absl::optional responseCode() const override { return response_code_; } + const absl::optional& responseCodeDetails() const override { + return response_code_details_; + } + + void setResponseCodeDetails(absl::string_view rc_details) override { + response_code_details_.emplace(rc_details); + } + void addBytesSent(uint64_t bytes_sent) override { bytes_sent_ += bytes_sent; } uint64_t bytesSent() const override { return bytes_sent_; } @@ -205,6 +213,7 @@ struct StreamInfoImpl : public StreamInfo { absl::optional protocol_; absl::optional response_code_; + absl::optional response_code_details_; uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; bool health_check_request_{}; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index a282fc88cda50..656efa37e44da 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -3119,6 +3119,9 @@ TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimits) { EXPECT_CALL(*encoder_filters_[1], encodeComplete()); Buffer::OwnedImpl data("A longer string"); decoder_filters_[0]->callbacks_->addDecodedData(data, false); + const auto rc_details = encoder_filters_[1]->callbacks_->streamInfo().responseCodeDetails(); + EXPECT_TRUE(rc_details.has_value()); + EXPECT_EQ("", rc_details.value()); } // Return 413 from an intermediate filter and make sure we don't continue the filter chain. diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 0fdbb9ec404aa..b51a005c111bc 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -738,6 +738,29 @@ TEST_F(RouterTestSuppressEnvoyHeaders, MaintenanceMode) { router_.decodeHeaders(headers, true); } +TEST_F(RouterTest, ResponseCodeDetailsSetByUpstream) { + NiceMock encoder1; + Http::StreamDecoder* response_decoder = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_); + return nullptr; + })); + expectResponseTimerCreate(); + + Http::TestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + + Http::HeaderMapPtr response_headers(new Http::TestHeaderMapImpl{{":status", "200"}}); + absl::string_view rc_details = StreamInfo::ResponseCodeDetails::get().ViaUpstream; + EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails(rc_details)); + response_decoder->decodeHeaders(std::move(response_headers), true); + EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); +} + // Validate that x-envoy-upstream-service-time is added on a regular // request/response path. TEST_F(RouterTest, EnvoyUpstreamServiceTime) { diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index 1476ce9aaf244..df6c786116f33 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -142,6 +142,11 @@ TEST_F(StreamInfoImplTest, MiscSettersAndGetters) { ASSERT_TRUE(stream_info.responseCode()); EXPECT_EQ(200, stream_info.responseCode().value()); + EXPECT_FALSE(stream_info.responseCodeDetails().has_value()); + stream_info.setResponseCodeDetails(ResponseCodeDetails::get().ViaUpstream); + ASSERT_TRUE(stream_info.responseCodeDetails().has_value()); + EXPECT_EQ(ResponseCodeDetails::get().ViaUpstream, stream_info.responseCodeDetails().value()); + EXPECT_EQ(nullptr, stream_info.upstreamHost()); Upstream::HostDescriptionConstSharedPtr host(new NiceMock()); stream_info.onUpstreamHostSelected(host); diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index f86acf63f2165..011cfd7b27892 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -31,6 +31,12 @@ class TestStreamInfo : public StreamInfo::StreamInfo { absl::optional protocol() const override { return protocol_; } void protocol(Http::Protocol protocol) override { protocol_ = protocol; } absl::optional responseCode() const override { return response_code_; } + const absl::optional& responseCodeDetails() const override { + return response_code_details_; + } + void setResponseCodeDetails(absl::string_view rc_details) override { + response_code_details_.emplace(rc_details); + } void addBytesSent(uint64_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } uint64_t bytesSent() const override { return 2; } bool intersectResponseFlags(uint64_t response_flags) const override { @@ -188,6 +194,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { absl::optional protocol_{Http::Protocol::Http11}; absl::optional response_code_; + absl::optional response_code_details_; uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; bool health_check_request_{}; diff --git a/test/mocks/stream_info/mocks.cc b/test/mocks/stream_info/mocks.cc index 08c0bd556a09f..ec2820957dd8d 100644 --- a/test/mocks/stream_info/mocks.cc +++ b/test/mocks/stream_info/mocks.cc @@ -70,6 +70,7 @@ MockStreamInfo::MockStreamInfo() })); ON_CALL(*this, protocol()).WillByDefault(ReturnPointee(&protocol_)); ON_CALL(*this, responseCode()).WillByDefault(ReturnPointee(&response_code_)); + ON_CALL(*this, responseCodeDetails()).WillByDefault(ReturnPointee(&response_code_details_)); ON_CALL(*this, addBytesReceived(_)).WillByDefault(Invoke([this](uint64_t bytes_received) { bytes_received_ += bytes_received; })); diff --git a/test/mocks/stream_info/mocks.h b/test/mocks/stream_info/mocks.h index a28db9bd865fe..88da794daf9fd 100644 --- a/test/mocks/stream_info/mocks.h +++ b/test/mocks/stream_info/mocks.h @@ -18,6 +18,7 @@ class MockStreamInfo : public StreamInfo { // StreamInfo::StreamInfo MOCK_METHOD1(setResponseFlag, void(ResponseFlag response_flag)); + MOCK_METHOD1(setResponseCodeDetails, void(absl::string_view)); MOCK_CONST_METHOD1(intersectResponseFlags, bool(uint64_t)); MOCK_METHOD1(onUpstreamHostSelected, void(Upstream::HostDescriptionConstSharedPtr host)); MOCK_CONST_METHOD0(startTime, SystemTime()); @@ -44,6 +45,7 @@ class MockStreamInfo : public StreamInfo { MOCK_CONST_METHOD0(protocol, absl::optional()); MOCK_METHOD1(protocol, void(Http::Protocol protocol)); MOCK_CONST_METHOD0(responseCode, absl::optional()); + MOCK_CONST_METHOD0(responseCodeDetails, const absl::optional&()); MOCK_METHOD1(addBytesSent, void(uint64_t)); MOCK_CONST_METHOD0(bytesSent, uint64_t()); MOCK_CONST_METHOD1(hasResponseFlag, bool(ResponseFlag)); @@ -90,6 +92,7 @@ class MockStreamInfo : public StreamInfo { absl::optional end_time_; absl::optional protocol_; absl::optional response_code_; + absl::optional response_code_details_; envoy::api::v2::core::Metadata metadata_; FilterStateImpl filter_state_; uint64_t bytes_received_{}; From 6054b73bf893bed3d25db45cbbbbea0fbc5739e2 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 15 Apr 2019 15:33:03 -0700 Subject: [PATCH 124/165] http1: enable reads when final pipeline response received (#6578) Previously we were doing this when we create a new stream, but on a reused connection this can lead to us missing an upstream disconnection when the connection is placed back in the pool. Fixes https://github.com/envoyproxy/envoy/issues/6190 Signed-off-by: Matt Klein --- source/common/http/http1/codec_impl.cc | 24 ++++++++++++------ test/common/http/http1/codec_impl_test.cc | 30 ++++++++++++++++------- 2 files changed, 38 insertions(+), 16 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 9adef3d928c35..eda67fa94b5a0 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -677,13 +677,11 @@ StreamEncoder& ClientConnectionImpl::newStream(StreamDecoder& response_decoder) if (resetStreamCalled()) { throw CodecClientException("cannot create new streams after calling reset"); } - // Streams are responsible for unwinding any outstanding readDisable(true) - // calls done on the underlying connection as they are destroyed. As this is - // the only place a HTTP/1 stream is destroyed where the Network::Connection is - // reused, unwind any outstanding readDisable() calls here. - while (!connection_.readEnabled()) { - connection_.readDisable(false); - } + + // If reads were disabled due to flow control, we expect reads to always be enabled again before + // reusing this connection. This is done when the final pipeline response is received. + ASSERT(connection_.readEnabled()); + request_encoder_ = std::make_unique(*this); pending_responses_.emplace_back(&response_decoder); return *request_encoder_; @@ -741,6 +739,18 @@ void ClientConnectionImpl::onMessageComplete() { PendingResponse response = pending_responses_.front(); pending_responses_.pop_front(); + // Streams are responsible for unwinding any outstanding readDisable(true) + // calls done on the underlying connection as they are destroyed. As this is + // the only place a HTTP/1 stream is destroyed where the Network::Connection is + // reused, unwind any outstanding readDisable() calls here. Only do this if there are no + // pipelined responses remaining. Also do this before we dispatch end_stream in case the caller + // immediately reuses the connection. + if (pending_responses_.empty()) { + while (!connection_.readEnabled()) { + connection_.readDisable(false); + } + } + if (deferred_end_stream_headers_) { response.decoder_->decodeHeaders(std::move(deferred_end_stream_headers_), true); deferred_end_stream_headers_.reset(); diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 82e5c188788b9..4d59984d6be74 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -787,7 +787,9 @@ TEST_F(Http1ClientConnectionImplTest, Reset) { request_encoder.getStream().resetStream(StreamResetReason::LocalReset); } -TEST_F(Http1ClientConnectionImplTest, MultipleHeaderOnlyThenNoContentLength) { +// Verify that we correctly enable reads on the connection when the final pipeline response is +// received. +TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { initialize(); Http::MockStreamDecoder response_decoder; @@ -796,26 +798,36 @@ TEST_F(Http1ClientConnectionImplTest, MultipleHeaderOnlyThenNoContentLength) { std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + // 1st pipeline request. TestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; request_encoder->encodeHeaders(headers, true); - EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); output.clear(); + // 2nd pipeline request. + request_encoder = &codec_->newStream(response_decoder); + request_encoder->encodeHeaders(headers, false); + Buffer::OwnedImpl empty; + request_encoder->encodeData(empty, true); + EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ntransfer-encoding: chunked\r\n\r\n0\r\n\r\n", output); + + // 1st response. + EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); + Buffer::OwnedImpl response("HTTP/1.1 503 Service Unavailable\r\nContent-Length: 0\r\n\r\n"); + codec_->dispatch(response); + // Simulate the underlying connection being backed up. Ensure that it is - // read-enabled as the new stream is created. + // read-enabled when the final response completes. EXPECT_CALL(connection_, readEnabled()) .Times(2) .WillOnce(Return(false)) .WillRepeatedly(Return(true)); EXPECT_CALL(connection_, readDisable(false)); - request_encoder = &codec_->newStream(response_decoder); - request_encoder->encodeHeaders(headers, false); - Buffer::OwnedImpl empty; - request_encoder->encodeData(empty, true); - - EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ntransfer-encoding: chunked\r\n\r\n0\r\n\r\n", output); + // 2nd response. + EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); + Buffer::OwnedImpl response2("HTTP/1.1 503 Service Unavailable\r\nContent-Length: 0\r\n\r\n"); + codec_->dispatch(response2); } TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { From 77748b20c5157a26afd63dcf28ceb8d8cf1e3823 Mon Sep 17 00:00:00 2001 From: Dan Rosen Date: Mon, 15 Apr 2019 19:12:56 -0400 Subject: [PATCH 125/165] event: add stats for loop duration and poll delay (#6517) Add per-thread dispatcher statistics for loop duration and poll delay, based on new "prepare" and "check" watchers added in libevent (libevent/libevent#793). See discussion in #4952. Risk Level: medium Testing: Added unit test, all existing tests pass, and running locally yields sane results. Docs Changes: Added a new page on "performance" discussing event loop. Release Notes: Added an entry noting new stats. Signed-off-by: Dan Rosen --- bazel/repository_locations.bzl | 8 ++-- docs/root/intro/version_history.rst | 1 + docs/root/operations/operations.rst | 2 + docs/root/operations/performance.rst | 40 ++++++++++++++++ include/envoy/event/dispatcher.h | 27 +++++++++++ include/envoy/server/worker.h | 6 ++- source/common/event/dispatcher_impl.cc | 10 ++++ source/common/event/dispatcher_impl.h | 5 ++ source/common/event/libevent_scheduler.cc | 58 +++++++++++++++++++++++ source/common/event/libevent_scheduler.h | 15 ++++++ source/server/listener_manager_impl.cc | 10 ++-- source/server/listener_manager_impl.h | 1 + source/server/server.cc | 3 ++ source/server/worker_impl.cc | 3 +- source/server/worker_impl.h | 2 +- test/common/event/BUILD | 1 + test/common/event/dispatcher_impl_test.cc | 13 +++++ test/mocks/event/mocks.cc | 1 + test/mocks/event/mocks.h | 1 + test/mocks/server/mocks.h | 2 +- test/mocks/thread_local/mocks.cc | 1 + test/server/listener_manager_impl_test.cc | 16 +++---- test/server/worker_impl_test.cc | 6 ++- tools/spelling_dictionary.txt | 1 + 24 files changed, 210 insertions(+), 23 deletions(-) create mode 100644 docs/root/operations/performance.rst diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index f2f8671998833..cbe18bc5dd46c 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -129,9 +129,11 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/benchmark/archive/505be96ab23056580a3a2315abba048f4428b04e.tar.gz"], ), com_github_libevent_libevent = dict( - sha256 = "53d4bb49b837944893b7caf9ae8eb43e94690ee5babea6469cc4a928722f99b1", - strip_prefix = "libevent-c4fbae3ae6166dddfa126734edd63213afa14dce", - urls = ["https://github.com/libevent/libevent/archive/c4fbae3ae6166dddfa126734edd63213afa14dce.tar.gz"], + sha256 = "217d7282d41faabac8c74d8ea0f215d8fa065691fb4b1f9205cbe16a2a65c1cc", + # This SHA is when "prepare" and "check" watchers were added to libevent (see + # https://github.com/libevent/libevent/pull/793). Update to v2.2 when it is released. + strip_prefix = "libevent-2f184f8bbf23377bddc8daa1a2c7b40735ee7e2a", + urls = ["https://github.com/libevent/libevent/archive/2f184f8bbf23377bddc8daa1a2c7b40735ee7e2a.tar.gz"], ), com_github_madler_zlib = dict( sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff", diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 35fba003bbae6..ffe16eda68412 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -4,6 +4,7 @@ Version history 1.11.0 (Pending) ================ * dubbo_proxy: support the :ref:`Dubbo proxy filter `. +* event: added :ref:`loop duration and poll delay statistics `. * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. * redis: add support for zpopmax and zpopmin commands. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. diff --git a/docs/root/operations/operations.rst b/docs/root/operations/operations.rst index 98005b9977ba6..54f9c4a89e0be 100644 --- a/docs/root/operations/operations.rst +++ b/docs/root/operations/operations.rst @@ -13,3 +13,5 @@ Operations and administration runtime fs_flags traffic_tapping + performance + diff --git a/docs/root/operations/performance.rst b/docs/root/operations/performance.rst new file mode 100644 index 0000000000000..fb92de1d3cbb4 --- /dev/null +++ b/docs/root/operations/performance.rst @@ -0,0 +1,40 @@ +.. _operations_performance: + +Performance +=========== + +Envoy is architected to optimize scalability and resource utilization by running an event loop on a +:ref:`small number of threads `. The "main" thread is responsible for +control plane processing, and each "worker" thread handles a portion of the data plane processing. +Envoy exposes two statistics to monitor performance of the event loops on all these threads. + +* **Loop duration:** Some amount of processing is done on each iteration of the event loop. This + amount will naturally vary with changes in load. However, if one or more threads have an unusually + long-tailed loop duration, it may indicate a performance issue. For example, work might not be + distributed fairly across the worker threads, or there may be a long blocking operation in an + extension that's impeding progress. + +* **Poll delay:** On each iteration of the event loop, the event dispatcher polls for I/O events + and "wakes up" either when some I/O events are ready to be processed or when a timeout fires, + whichever occurs first. In the case of a timeout, we can measure the difference between the expected + wakeup time and the actual wakeup time after polling; this difference is called the "poll delay." + It's normal to see some small poll delay, usually equal to the kernel scheduler's "time slice" or + "quantum"---this depends on the specific operating system on which Envoy is running---but if this + number elevates substantially above its normal observed baseline, it likely indicates kernel + scheduler delays. + +Statistics +---------- + +The event dispatcher for the main thread has a statistics tree rooted at *server.dispatcher.*, and +the event dispatcher for each worker thread has a statistics tree rooted at +*listener_manager.worker_.dispatcher.*, each with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + loop_duration_us, Histogram, Event loop durations in microseconds + poll_delay_us, Histogram, Polling delays in microseconds + +Note that any auxiliary threads are not included here. diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 1e0b52a10f270..0024b3a2e7795 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -17,11 +17,29 @@ #include "envoy/network/listen_socket.h" #include "envoy/network/listener.h" #include "envoy/network/transport_socket.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" #include "envoy/thread/thread.h" namespace Envoy { namespace Event { +/** + * All dispatcher stats. @see stats_macros.h + */ +// clang-format off +#define ALL_DISPATCHER_STATS(HISTOGRAM) \ + HISTOGRAM(loop_duration_us) \ + HISTOGRAM(poll_delay_us) +// clang-format on + +/** + * Struct definition for all dispatcher stats. @see stats_macros.h + */ +struct DispatcherStats { + ALL_DISPATCHER_STATS(GENERATE_HISTOGRAM_STRUCT) +}; + /** * Callback invoked when a dispatcher post() runs. */ @@ -39,6 +57,15 @@ class Dispatcher { */ virtual TimeSource& timeSource() PURE; + /** + * Initialize stats for this dispatcher. Note that this can't generally be done at construction + * time, since the main and worker thread dispatchers are constructed before + * ThreadLocalStoreImpl::initializeThreading. + * @param scope the scope to contain the new per-dispatcher stats created here. + * @param prefix the stats prefix to identify this dispatcher. + */ + virtual void initializeStats(Stats::Scope& scope, const std::string& prefix) PURE; + /** * Clear any items in the deferred deletion queue. */ diff --git a/include/envoy/server/worker.h b/include/envoy/server/worker.h index cb845b318158c..b7af5aae7d333 100644 --- a/include/envoy/server/worker.h +++ b/include/envoy/server/worker.h @@ -38,10 +38,12 @@ class Worker { virtual uint64_t numConnections() PURE; /** - * Start the worker thread. + * Start the worker thread. The worker will output thread-specific stats under the given scope. * @param guard_dog supplies the guard dog to use for thread watching. + * @param scope the scope to add the new worker stats to. + * @param prefix the prefix for the new stats, identifying this worker. */ - virtual void start(GuardDog& guard_dog) PURE; + virtual void start(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix) PURE; /** * Stop the worker thread. diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index 8e737de4de17f..9d7badc88f4c0 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -41,6 +41,13 @@ DispatcherImpl::DispatcherImpl(Buffer::WatermarkFactoryPtr&& factory, Api::Api& DispatcherImpl::~DispatcherImpl() {} +void DispatcherImpl::initializeStats(Stats::Scope& scope, const std::string& prefix) { + stats_prefix_ = prefix + "dispatcher"; + stats_ = std::make_unique( + DispatcherStats{ALL_DISPATCHER_STATS(POOL_HISTOGRAM_PREFIX(scope, stats_prefix_ + "."))}); + base_scheduler_.initializeStats(stats_.get()); +} + void DispatcherImpl::clearDeferredDeleteList() { ASSERT(isThreadSafe()); std::vector* to_delete = current_to_delete_; @@ -158,6 +165,9 @@ void DispatcherImpl::post(std::function callback) { void DispatcherImpl::run(RunType type) { run_tid_ = api_.threadFactory().currentThreadId(); + if (!stats_prefix_.empty()) { + ENVOY_LOG(debug, "running {} on thread {}", stats_prefix_, run_tid_->debugString()); + } // Flush all post callbacks before we run the event loop. We do this because there are post // callbacks that have to get run before the initial event loop starts running. libevent does diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index a51dce3e6aaff..b712f22d879e4 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "envoy/api/api.h" @@ -10,6 +11,7 @@ #include "envoy/event/deferred_deletable.h" #include "envoy/event/dispatcher.h" #include "envoy/network/connection_handler.h" +#include "envoy/stats/scope.h" #include "common/common/logger.h" #include "common/common/thread.h" @@ -36,6 +38,7 @@ class DispatcherImpl : Logger::Loggable, public Dispatcher { // Event::Dispatcher TimeSource& timeSource() override { return api_.timeSource(); } + void initializeStats(Stats::Scope& scope, const std::string& prefix) override; void clearDeferredDeleteList() override; Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, @@ -72,6 +75,8 @@ class DispatcherImpl : Logger::Loggable, public Dispatcher { bool isThreadSafe() const { return run_tid_ == nullptr || run_tid_->isCurrentThreadId(); } Api::Api& api_; + std::string stats_prefix_; + std::unique_ptr stats_; Thread::ThreadIdPtr run_tid_; Buffer::WatermarkFactoryPtr buffer_factory_; LibeventScheduler base_scheduler_; diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index 5b35ffd18447e..df22b45ba7371 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -3,9 +3,17 @@ #include "common/common/assert.h" #include "common/event/timer_impl.h" +#include "event2/util.h" + namespace Envoy { namespace Event { +namespace { +void recordTimeval(Stats::Histogram& histogram, const timeval& tv) { + histogram.recordValue(tv.tv_sec * 1000000 + tv.tv_usec); +} +} // namespace + LibeventScheduler::LibeventScheduler() : libevent_(event_base_new()) { // The dispatcher won't work as expected if libevent hasn't been configured to use threads. RELEASE_ASSERT(Libevent::Global::initialized(), ""); @@ -41,5 +49,55 @@ void LibeventScheduler::run(Dispatcher::RunType mode) { void LibeventScheduler::loopExit() { event_base_loopexit(libevent_.get(), nullptr); } +void LibeventScheduler::initializeStats(DispatcherStats* stats) { + stats_ = stats; + // These are thread safe. + evwatch_prepare_new(libevent_.get(), &onPrepare, this); + evwatch_check_new(libevent_.get(), &onCheck, this); +} + +void LibeventScheduler::onPrepare(evwatch*, const evwatch_prepare_cb_info* info, void* arg) { + // `self` is `this`, passed in from evwatch_prepare_new. + auto self = static_cast(arg); + + // Record poll timeout and prepare time for this iteration of the event loop. The timeout is the + // expected polling duration, whereas the actual polling duration will be the difference measured + // between the prepare time and the check time immediately after polling. These are compared in + // onCheck to compute the poll_delay stat. + self->timeout_set_ = evwatch_prepare_get_timeout(info, &self->timeout_); + evutil_gettimeofday(&self->prepare_time_, nullptr); + + // If we have a check time available from a previous iteration of the event loop (that is, all but + // the first), compute the loop_duration stat. + if (self->check_time_.tv_sec != 0) { + timeval delta; + evutil_timersub(&self->prepare_time_, &self->check_time_, &delta); + recordTimeval(self->stats_->loop_duration_us_, delta); + } +} + +void LibeventScheduler::onCheck(evwatch*, const evwatch_check_cb_info*, void* arg) { + // `self` is `this`, passed in from evwatch_check_new. + auto self = static_cast(arg); + + // Record check time for this iteration of the event loop. Use this together with prepare time + // from above to compute the actual polling duration, and store it for the next iteration of the + // event loop to compute the loop duration. + evutil_gettimeofday(&self->check_time_, nullptr); + if (self->timeout_set_) { + timeval delta, delay; + evutil_timersub(&self->check_time_, &self->prepare_time_, &delta); + evutil_timersub(&delta, &self->timeout_, &delay); + + // Delay can be negative, meaning polling completed early. This happens in normal operation, + // either because I/O was ready before we hit the timeout, or just because the kernel was + // feeling saucy. Disregard negative delays in stats, since they don't indicate anything + // particularly useful. + if (delay.tv_sec >= 0) { + recordTimeval(self->stats_->poll_delay_us_, delay); + } + } +} + } // namespace Event } // namespace Envoy diff --git a/source/common/event/libevent_scheduler.h b/source/common/event/libevent_scheduler.h index 5a41e1ccf6c4f..b9157bf4059b5 100644 --- a/source/common/event/libevent_scheduler.h +++ b/source/common/event/libevent_scheduler.h @@ -6,6 +6,7 @@ #include "common/event/libevent.h" #include "event2/event.h" +#include "event2/watch.h" namespace Envoy { namespace Event { @@ -40,8 +41,22 @@ class LibeventScheduler : public Scheduler { */ event_base& base() { return *libevent_; } + /** + * Start writing stats once thread-local storage is ready to receive them (see + * ThreadLocalStoreImpl::initializeThreading). + */ + void initializeStats(DispatcherStats* stats_); + private: + static void onPrepare(evwatch*, const evwatch_prepare_cb_info* info, void* arg); + static void onCheck(evwatch*, const evwatch_check_cb_info*, void* arg); + Libevent::BasePtr libevent_; + DispatcherStats* stats_{}; // stats owned by the containing DispatcherImpl + bool timeout_set_{}; // whether there is a poll timeout in the current event loop iteration + timeval timeout_{}; // the poll timeout for the current event loop iteration, if available + timeval prepare_time_{}; // timestamp immediately before polling + timeval check_time_{}; // timestamp immediately after polling }; } // namespace Event diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index fc64828f82a7b..b8bc718037e2b 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -672,7 +672,8 @@ void ListenerImpl::setSocket(const Network::SocketSharedPtr& socket) { ListenerManagerImpl::ListenerManagerImpl(Instance& server, ListenerComponentFactory& listener_factory, WorkerFactory& worker_factory) - : server_(server), factory_(listener_factory), stats_(generateStats(server.stats())), + : server_(server), factory_(listener_factory), + scope_(server.stats().createScope("listener_manager.")), stats_(generateStats(*scope_)), config_tracker_entry_(server.admin().getConfigTracker().add( "listeners", [this] { return dumpListenerConfigs(); })) { for (uint32_t i = 0; i < server.options().concurrency(); i++) { @@ -718,9 +719,7 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { } ListenerManagerStats ListenerManagerImpl::generateStats(Stats::Scope& scope) { - const std::string final_prefix = "listener_manager."; - return {ALL_LISTENER_MANAGER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix), - POOL_GAUGE_PREFIX(scope, final_prefix))}; + return {ALL_LISTENER_MANAGER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; } bool ListenerManagerImpl::addOrUpdateListener(const envoy::api::v2::Listener& config, @@ -1006,12 +1005,13 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog) { ENVOY_LOG(info, "all dependencies initialized. starting workers"); ASSERT(!workers_started_); workers_started_ = true; + uint32_t i = 0; for (const auto& worker : workers_) { ASSERT(warming_listeners_.empty()); for (const auto& listener : active_listeners_) { addListenerToWorker(*worker, *listener); } - worker->start(guard_dog); + worker->start(guard_dog, *scope_, fmt::format("worker_{}.", i++)); } } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 41da1135d4443..b27780fb486c8 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -177,6 +177,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable draining_listeners_; std::list workers_; bool workers_started_{}; + Stats::ScopePtr scope_; ListenerManagerStats stats_; ConfigTracker::EntryOwnerPtr config_tracker_entry_; LdsApiPtr lds_api_; diff --git a/source/server/server.cc b/source/server/server.cc index a75d0d291ba47..38703d0d43a53 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -311,6 +311,9 @@ void InstanceImpl::initialize(const Options& options, // We can now initialize stats for threading. stats_store_.initializeThreading(*dispatcher_, thread_local_); + // It's now safe to start writing stats from the main thread's dispatcher. + dispatcher_->initializeStats(stats_store_, "server."); + // Runtime gets initialized before the main configuration since during main configuration // load things may grab a reference to the loader for later use. runtime_singleton_ = std::make_unique( diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index 660d1ac1cf739..40cf1a64674d3 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -67,10 +67,11 @@ void WorkerImpl::removeListener(Network::ListenerConfig& listener, }); } -void WorkerImpl::start(GuardDog& guard_dog) { +void WorkerImpl::start(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix) { ASSERT(!thread_); thread_ = api_.threadFactory().createThread([this, &guard_dog]() -> void { threadRoutine(guard_dog); }); + dispatcher_->initializeStats(scope, prefix); } void WorkerImpl::stop() { diff --git a/source/server/worker_impl.h b/source/server/worker_impl.h index b59c7356f2134..1b9c6bb96c09b 100644 --- a/source/server/worker_impl.h +++ b/source/server/worker_impl.h @@ -44,7 +44,7 @@ class WorkerImpl : public Worker, Logger::Loggable { void addListener(Network::ListenerConfig& listener, AddListenerCompletion completion) override; uint64_t numConnections() override; void removeListener(Network::ListenerConfig& listener, std::function completion) override; - void start(GuardDog& guard_dog) override; + void start(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix) override; void stop() override; void stopListener(Network::ListenerConfig& listener) override; void stopListeners() override; diff --git a/test/common/event/BUILD b/test/common/event/BUILD index 0a5ad582e66b2..4e432fb57625e 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -17,6 +17,7 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/stats:isolated_store_lib", "//test/mocks:common_lib", + "//test/mocks/stats:stats_mocks", "//test/test_common:utility_lib", ], ) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index af0cd7e7579c0..25185a30364e2 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -8,12 +8,17 @@ #include "common/stats/isolated_store_impl.h" #include "test/mocks/common.h" +#include "test/mocks/stats/mocks.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::_; using testing::InSequence; +using testing::NiceMock; +using testing::Return; +using testing::StartsWith; namespace Envoy { namespace Event { @@ -90,6 +95,14 @@ class DispatcherImplTest : public testing::Test { TimerPtr keepalive_timer_; }; +TEST_F(DispatcherImplTest, InitializeStats) { + // NiceMock because deliverHistogramToSinks may or may not be called, depending on timing. + NiceMock scope; + EXPECT_CALL(scope, histogram("test.dispatcher.loop_duration_us")); + EXPECT_CALL(scope, histogram("test.dispatcher.poll_delay_us")); + dispatcher_->initializeStats(scope, "test."); +} + TEST_F(DispatcherImplTest, Post) { dispatcher_->post([this]() { { diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index d79fe9db839cb..f1d5cdcadb19c 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -16,6 +16,7 @@ namespace Envoy { namespace Event { MockDispatcher::MockDispatcher() { + ON_CALL(*this, initializeStats(_, _)).WillByDefault(Return()); ON_CALL(*this, clearDeferredDeleteList()).WillByDefault(Invoke([this]() -> void { to_delete_.clear(); })); diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index a71b0e60998c9..4237a3b57ca0b 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -84,6 +84,7 @@ class MockDispatcher : public Dispatcher { } // Event::Dispatcher + MOCK_METHOD2(initializeStats, void(Stats::Scope&, const std::string&)); MOCK_METHOD0(clearDeferredDeleteList, void()); MOCK_METHOD2(createServerConnection_, Network::Connection*(Network::ConnectionSocket* socket, diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 3374d8b5f8b85..cc99817ff7064 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -307,7 +307,7 @@ class MockWorker : public Worker { MOCK_METHOD0(numConnections, uint64_t()); MOCK_METHOD2(removeListener, void(Network::ListenerConfig& listener, std::function completion)); - MOCK_METHOD1(start, void(GuardDog& guard_dog)); + MOCK_METHOD3(start, void(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix)); MOCK_METHOD0(stop, void()); MOCK_METHOD1(stopListener, void(Network::ListenerConfig& listener)); MOCK_METHOD0(stopListeners, void()); diff --git a/test/mocks/thread_local/mocks.cc b/test/mocks/thread_local/mocks.cc index cfabd7a7f52f0..5ccc69fcf21d9 100644 --- a/test/mocks/thread_local/mocks.cc +++ b/test/mocks/thread_local/mocks.cc @@ -13,6 +13,7 @@ MockInstance::MockInstance() { ON_CALL(*this, allocateSlot()).WillByDefault(Invoke(this, &MockInstance::allocateSlot_)); ON_CALL(*this, runOnAllThreads(_)).WillByDefault(Invoke(this, &MockInstance::runOnAllThreads_)); ON_CALL(*this, shutdownThread()).WillByDefault(Invoke(this, &MockInstance::shutdownThread_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); } MockInstance::~MockInstance() { shutdownThread_(); } diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 53f9e086ba7ee..87399c190bb8c 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -771,7 +771,7 @@ version_info: version2 // Start workers. EXPECT_CALL(*worker_, addListener(_, _)); - EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(*worker_, start(_, _, _)); manager_->startWorkers(guard_dog_); worker_->callAddCompletion(true); @@ -954,7 +954,7 @@ version_info: version5 TEST_F(ListenerManagerImplTest, AddDrainingListener) { InSequence s; - EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(*worker_, start(_, _, _)); manager_->startWorkers(guard_dog_); // Add foo listener directly into active. @@ -1003,7 +1003,7 @@ TEST_F(ListenerManagerImplTest, AddDrainingListener) { TEST_F(ListenerManagerImplTest, CantBindSocket) { InSequence s; - EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(*worker_, start(_, _, _)); manager_->startWorkers(guard_dog_); const std::string listener_foo_json = R"EOF( @@ -1025,7 +1025,7 @@ TEST_F(ListenerManagerImplTest, CantBindSocket) { TEST_F(ListenerManagerImplTest, ListenerDraining) { InSequence s; - EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(*worker_, start(_, _, _)); manager_->startWorkers(guard_dog_); const std::string listener_foo_json = R"EOF( @@ -1073,7 +1073,7 @@ TEST_F(ListenerManagerImplTest, ListenerDraining) { TEST_F(ListenerManagerImplTest, RemoveListener) { InSequence s; - EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(*worker_, start(_, _, _)); manager_->startWorkers(guard_dog_); // Remove an unknown listener. @@ -1149,7 +1149,7 @@ TEST_F(ListenerManagerImplTest, RemoveListener) { TEST_F(ListenerManagerImplTest, AddListenerFailure) { InSequence s; - EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(*worker_, start(_, _, _)); manager_->startWorkers(guard_dog_); // Add foo listener into active. @@ -1197,7 +1197,7 @@ TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { InSequence s; - EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(*worker_, start(_, _, _)); manager_->startWorkers(guard_dog_); // Add foo listener into warming. @@ -1250,7 +1250,7 @@ TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { TEST_F(ListenerManagerImplTest, EarlyShutdown) { // If stopWorkers is called before the workers are started, it should be a no-op: they should be // neither started nor stopped. - EXPECT_CALL(*worker_, start(_)).Times(0); + EXPECT_CALL(*worker_, start(_, _, _)).Times(0); EXPECT_CALL(*worker_, stop()).Times(0); manager_->stopWorkers(); } diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index c59063e3e3315..6e669107f12d6 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -71,7 +71,8 @@ TEST_F(WorkerImplTest, BasicFlow) { ci.setReady(); }); - worker_.start(guard_dog_); + NiceMock store; + worker_.start(guard_dog_, store, "test"); ci.waitReady(); // After a worker is started adding/stopping/removing a listener happens on the worker thread. @@ -140,7 +141,8 @@ TEST_F(WorkerImplTest, ListenerException) { .WillOnce(Throw(Network::CreateListenerException("failed"))); worker_.addListener(listener, [](bool success) -> void { EXPECT_FALSE(success); }); - worker_.start(guard_dog_); + NiceMock store; + worker_.start(guard_dog_, store, "test"); worker_.stop(); } diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 1240f39f89a5f..efe5cab515853 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -423,6 +423,7 @@ evbuffer evbuffers evconnlistener evthread +evwatch exe execlp facto From e031911d3c1d357991e0d84062394773e0bbb75b Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Mon, 15 Apr 2019 16:41:05 -0700 Subject: [PATCH 126/165] network: expose unix socket peer credentials on connection (#6460) This allows retrieving the pid/uid/gid from the connection if the connection is made using a unix socket. Signed-off-by: Snow Pettersen --- include/envoy/network/connection.h | 24 +++++++++++++++++++++++ source/common/network/connection_impl.cc | 17 ++++++++++++++++ source/common/network/connection_impl.h | 1 + test/integration/uds_integration_test.cc | 25 ++++++++++++++++++++++++ test/mocks/network/connection.h | 4 ++++ 5 files changed, 71 insertions(+) diff --git a/include/envoy/network/connection.h b/include/envoy/network/connection.h index 20cf091dcae94..3630fd34a400b 100644 --- a/include/envoy/network/connection.h +++ b/include/envoy/network/connection.h @@ -166,6 +166,30 @@ class Connection : public Event::DeferredDeletable, public FilterManager { */ virtual const Network::Address::InstanceConstSharedPtr& remoteAddress() const PURE; + /** + * Credentials of the peer of a socket as decided by SO_PEERCRED. + */ + struct UnixDomainSocketPeerCredentials { + /** + * The process id of the peer. + */ + int32_t pid; + /** + * The user id of the peer. + */ + uint32_t uid; + /** + * The group id of the peer. + */ + uint32_t gid; + }; + + /** + * @return The unix socket peer credentials of the the remote client. Note that this is only + * supported for unix socket connections. + */ + virtual absl::optional unixSocketPeerCredentials() const PURE; + /** * @return the local address of the connection. For client connections, this is the origin * address. For server connections, this is the local destination address. For server connections diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 871ac6e35433c..c32f47c90013b 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -513,6 +513,23 @@ void ConnectionImpl::onReadReady() { } } +absl::optional +ConnectionImpl::unixSocketPeerCredentials() const { + // TODO(snowp): Support non-linux platforms. +#ifndef SO_PEERCRED + return absl::nullopt; +#else + struct ucred ucred; + socklen_t ucred_size = sizeof(ucred); + int rc = getsockopt(ioHandle().fd(), SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size); + if (rc == -1) { + return absl::nullopt; + } + + return {{ucred.pid, ucred.uid, ucred.gid}}; +#endif +} + void ConnectionImpl::onWriteReady() { ENVOY_CONN_LOG(trace, "write ready", *this); diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 97e0e21c7600c..dc72e208b15b4 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -80,6 +80,7 @@ class ConnectionImpl : public virtual Connection, const Address::InstanceConstSharedPtr& localAddress() const override { return socket_->localAddress(); } + absl::optional unixSocketPeerCredentials() const override; void setConnectionStats(const ConnectionStats& stats) override; const Ssl::ConnectionInfo* ssl() const override { return transport_socket_->ssl(); } State state() const override; diff --git a/test/integration/uds_integration_test.cc b/test/integration/uds_integration_test.cc index 8c93f738ea05a..caa7c93b9c953 100644 --- a/test/integration/uds_integration_test.cc +++ b/test/integration/uds_integration_test.cc @@ -78,6 +78,31 @@ HttpIntegrationTest::ConnectionCreationFunction UdsListenerIntegrationTest::crea }; } +TEST_P(UdsListenerIntegrationTest, TestPeerCredentials) { + fake_upstreams_count_ = 1; + initialize(); + auto client_connection = createConnectionFn()(); + codec_client_ = makeHttpConnection(std::move(client_connection)); + Http::TestHeaderMapImpl request_headers{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(0); + + auto credentials = codec_client_->connection()->unixSocketPeerCredentials(); +#ifndef SO_PEERCRED + EXPECT_EQ(credentials, absl::nullopt); +#else + EXPECT_EQ(credentials->pid, getpid()); + EXPECT_EQ(credentials->uid, getuid()); + EXPECT_EQ(credentials->gid, getgid()); +#endif + + upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); +} + TEST_P(UdsListenerIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) { ConnectionCreationFunction creator = createConnectionFn(); testRouterRequestAndResponseWithBody(1024, 512, false, &creator); diff --git a/test/mocks/network/connection.h b/test/mocks/network/connection.h index 672986a77ebb0..3430fcf14d0a2 100644 --- a/test/mocks/network/connection.h +++ b/test/mocks/network/connection.h @@ -65,6 +65,8 @@ class MockConnection : public Connection, public MockConnectionBase { MOCK_METHOD1(detectEarlyCloseWhenReadDisabled, void(bool)); MOCK_CONST_METHOD0(readEnabled, bool()); MOCK_CONST_METHOD0(remoteAddress, const Address::InstanceConstSharedPtr&()); + MOCK_CONST_METHOD0(unixSocketPeerCredentials, + absl::optional()); MOCK_CONST_METHOD0(localAddress, const Address::InstanceConstSharedPtr&()); MOCK_METHOD1(setConnectionStats, void(const ConnectionStats& stats)); MOCK_CONST_METHOD0(ssl, const Ssl::ConnectionInfo*()); @@ -109,6 +111,8 @@ class MockClientConnection : public ClientConnection, public MockConnectionBase MOCK_METHOD1(detectEarlyCloseWhenReadDisabled, void(bool)); MOCK_CONST_METHOD0(readEnabled, bool()); MOCK_CONST_METHOD0(remoteAddress, const Address::InstanceConstSharedPtr&()); + MOCK_CONST_METHOD0(unixSocketPeerCredentials, + absl::optional()); MOCK_CONST_METHOD0(localAddress, const Address::InstanceConstSharedPtr&()); MOCK_METHOD1(setConnectionStats, void(const ConnectionStats& stats)); MOCK_CONST_METHOD0(ssl, const Ssl::ConnectionInfo*()); From 61e4fea7e2a0db270fa52a3bea95be45e19e223a Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 15 Apr 2019 20:27:08 -0700 Subject: [PATCH 127/165] tools: allow hooks/format to be run from owning directory (#6585) Risk Level: Low Testing: Manual Signed-off-by: Matt Klein --- support/bootstrap | 6 +++--- tools/format_python_tools.sh | 5 ++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/support/bootstrap b/support/bootstrap index 0b1c52c007059..d53a7a7ca29e2 100755 --- a/support/bootstrap +++ b/support/bootstrap @@ -45,14 +45,14 @@ if test ! -d "${DOT_GIT_DIR}"; then fi HOOKS_DIR="${DOT_GIT_DIR}/hooks" -HOOKS_DIR_RELPATH=$(relpath "${HOOKS_DIR}" "${PWD}") +HOOKS_DIR_RELPATH=$(relpath "${HOOKS_DIR}" "$(dirname $0)") if [ ! -e "${HOOKS_DIR}/prepare-commit-msg" ]; then echo "Installing hook 'prepare-commit-msg'" - ln -s "${HOOKS_DIR_RELPATH}/support/hooks/prepare-commit-msg" "${HOOKS_DIR}/prepare-commit-msg" + ln -sf "${HOOKS_DIR_RELPATH}/hooks/prepare-commit-msg" "${HOOKS_DIR}/prepare-commit-msg" fi if [ ! -e "${HOOKS_DIR}/pre-push" ]; then echo "Installing hook 'pre-push'" - ln -s "${HOOKS_DIR_RELPATH}/support/hooks/pre-push" "${HOOKS_DIR}/pre-push" + ln -sf "${HOOKS_DIR_RELPATH}/hooks/pre-push" "${HOOKS_DIR}/pre-push" fi diff --git a/tools/format_python_tools.sh b/tools/format_python_tools.sh index ad8e1db4e5f45..c58dccfb763e0 100755 --- a/tools/format_python_tools.sh +++ b/tools/format_python_tools.sh @@ -1,11 +1,10 @@ #!/bin/bash -. tools/shell_utils.sh - set -e VENV_DIR="pyformat" SCRIPTPATH=$(realpath "$(dirname $0)") +. $SCRIPTPATH/shell_utils.sh cd "$SCRIPTPATH" source_venv "$VENV_DIR" @@ -16,4 +15,4 @@ echo "Running Python format check..." python format_python_tools.py $1 echo "Running Python3 flake8 check..." -flake8 . --exclude=*/venv/* --count --select=E901,E999,F821,F822,F823 --show-source --statistics \ No newline at end of file +flake8 . --exclude=*/venv/* --count --select=E901,E999,F821,F822,F823 --show-source --statistics From 21fd1197716bdeca7e259d25f446ea1552a78246 Mon Sep 17 00:00:00 2001 From: Michael Puncel Date: Tue, 16 Apr 2019 11:52:19 -0400 Subject: [PATCH 128/165] refactor router filter to store upstream requests in a list. (#6540) This is in preparation for implementing #5841 which will introduce request racing. As of this commit there is no situation where there will be more than one upstream request in flight, however it organizes the code in such a way that doing so will cause less code churn. Signed-off-by: Michael Puncel --- source/common/http/conn_manager_impl.cc | 23 +-- source/common/http/conn_manager_impl.h | 2 +- source/common/router/BUILD | 1 + source/common/router/router.cc | 191 +++++++++++---------- source/common/router/router.h | 31 ++-- test/common/http/conn_manager_impl_test.cc | 10 ++ 6 files changed, 144 insertions(+), 114 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 329758d835e1f..764dc0e179dcb 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1547,16 +1547,16 @@ bool ConnectionManagerImpl::ActiveStream::verbose() const { void ConnectionManagerImpl::ActiveStream::callHighWatermarkCallbacks() { ++high_watermark_count_; - if (watermark_callbacks_) { - watermark_callbacks_->onAboveWriteBufferHighWatermark(); + for (auto watermark_callbacks : watermark_callbacks_) { + watermark_callbacks->onAboveWriteBufferHighWatermark(); } } void ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks() { ASSERT(high_watermark_count_ > 0); --high_watermark_count_; - if (watermark_callbacks_) { - watermark_callbacks_->onBelowWriteBufferLowWatermark(); + for (auto watermark_callbacks : watermark_callbacks_) { + watermark_callbacks->onBelowWriteBufferLowWatermark(); } } @@ -1891,19 +1891,20 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter:: void ConnectionManagerImpl::ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { - // This is called exactly once per stream, by the router filter. - // If there's ever a need for another filter to subscribe to watermark callbacks this can be - // turned into a vector. - ASSERT(parent_.watermark_callbacks_ == nullptr); - parent_.watermark_callbacks_ = &watermark_callbacks; + // This is called exactly once per upstream-stream, by the router filter. Therefore, we + // expect the same callbacks to not be registered twice. + ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), + &watermark_callbacks) == parent_.watermark_callbacks_.end()); + parent_.watermark_callbacks_.emplace(parent_.watermark_callbacks_.end(), &watermark_callbacks); for (uint32_t i = 0; i < parent_.high_watermark_count_; ++i) { watermark_callbacks.onAboveWriteBufferHighWatermark(); } } void ConnectionManagerImpl::ActiveStreamDecoderFilter::removeDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { - ASSERT(parent_.watermark_callbacks_ == &watermark_callbacks); - parent_.watermark_callbacks_ = nullptr; + ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), + &watermark_callbacks) != parent_.watermark_callbacks_.end()); + parent_.watermark_callbacks_.remove(&watermark_callbacks); } bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 2ba321902cf6f..3d61de6622912 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -503,7 +503,7 @@ class ConnectionManagerImpl : Logger::Loggable, StreamInfo::StreamInfoImpl stream_info_; absl::optional cached_route_; absl::optional cached_cluster_info_; - DownstreamWatermarkCallbacks* watermark_callbacks_{nullptr}; + std::list watermark_callbacks_{}; uint32_t buffer_limit_{0}; uint32_t high_watermark_count_{0}; const std::string* decorated_operation_{nullptr}; diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 498babb17239e..39c941bc3f06d 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -145,6 +145,7 @@ envoy_cc_library( "//source/common/common:enum_to_int", "//source/common/common:hash_lib", "//source/common/common:hex_lib", + "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/grpc:common_lib", diff --git a/source/common/router/router.cc b/source/common/router/router.cc index f267e98229214..72c9576a43d9f 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -181,7 +181,7 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he Filter::~Filter() { // Upstream resources should already have been cleaned. - ASSERT(!upstream_request_); + ASSERT(upstream_requests_.empty()); ASSERT(!retry_state_); } @@ -380,8 +380,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e ENVOY_STREAM_LOG(debug, "router decoding headers:\n{}", *callbacks_, headers); - upstream_request_ = std::make_unique(*this, *conn_pool); - upstream_request_->encodeHeaders(end_stream); + UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); + upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + upstream_requests_.front()->encodeHeaders(end_stream); if (end_stream) { onRequestComplete(); } @@ -413,6 +414,7 @@ void Filter::sendNoHealthyUpstreamResponse() { } Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) { + ASSERT(upstream_requests_.size() == 1); bool buffering = (retry_state_ && retry_state_->enabled()) || do_shadowing_; if (buffering && buffer_limit_ > 0 && getLength(callbacks_->decodingBuffer()) + data.length() > buffer_limit_) { @@ -427,7 +429,7 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea // If we are going to buffer for retries or shadowing, we need to make a copy before encoding // since it's all moves from here on. Buffer::OwnedImpl copy(data); - upstream_request_->encodeData(copy, end_stream); + upstream_requests_.front()->encodeData(copy, end_stream); // If we are potentially going to retry or shadow this request we need to buffer. // This will not cause the connection manager to 413 because before we hit the @@ -436,7 +438,7 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea // potentially shadow. callbacks_->addDecodedData(data, true); } else { - upstream_request_->encodeData(data, end_stream); + upstream_requests_.front()->encodeData(data, end_stream); } if (end_stream) { @@ -449,7 +451,8 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap& trailers) { ENVOY_STREAM_LOG(debug, "router decoding trailers:\n{}", *callbacks_, trailers); downstream_trailers_ = &trailers; - upstream_request_->encodeTrailers(trailers); + ASSERT(upstream_requests_.size() == 1); + upstream_requests_.front()->encodeTrailers(trailers); onRequestComplete(); return Http::FilterTrailersStatus::StopIteration; } @@ -463,13 +466,15 @@ void Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callb } void Filter::cleanup() { - // upstream_request_ is only destroyed in this method (cleanup()) or when we + ASSERT(upstream_requests_.size() <= 1); + // UpstreamRequests are only destroyed in this method (cleanup()) or when we // do a retry (setupRetry()). In the latter case we don't want to save the // upstream timings to the downstream info. - if (upstream_request_) { - callbacks_->streamInfo().setUpstreamTiming(upstream_request_->upstream_timing_); + if (upstream_requests_.size() == 1) { + UpstreamRequestPtr upstream_request = + upstream_requests_.back()->removeFromList(upstream_requests_); + callbacks_->streamInfo().setUpstreamTiming(upstream_request->upstream_timing_); } - upstream_request_.reset(); retry_state_.reset(); if (response_timeout_) { response_timeout_->disableTimer(); @@ -502,7 +507,7 @@ void Filter::onRequestComplete() { downstream_request_complete_time_ = dispatcher.timeSource().monotonicTime(); // Possible that we got an immediate reset. - if (upstream_request_) { + if (upstream_requests_.size() == 1) { // Even if we got an immediate reset, we could still shadow, but that is a riskier change and // seems unnecessary right now. maybeDoShadowing(); @@ -515,8 +520,8 @@ void Filter::onRequestComplete() { } void Filter::onDestroy() { - if (upstream_request_ && !attempting_internal_redirect_with_complete_stream_) { - upstream_request_->resetStream(); + if (upstream_requests_.size() == 1 && !attempting_internal_redirect_with_complete_stream_) { + upstream_requests_.front()->resetStream(); } cleanup(); } @@ -525,35 +530,32 @@ void Filter::onResponseTimeout() { ENVOY_STREAM_LOG(debug, "upstream timeout", *callbacks_); cluster_->stats().upstream_rq_timeout_.inc(); - // It's possible to timeout during a retry backoff delay when we have no upstream request. - if (upstream_request_) { - if (upstream_request_->upstream_host_) { - upstream_request_->upstream_host_->stats().rq_timeout_.inc(); + ASSERT(upstream_requests_.size() <= 1); + if (upstream_requests_.size() == 1) { + if (upstream_requests_.front()->upstream_host_) { + upstream_requests_.front()->upstream_host_->stats().rq_timeout_.inc(); } - upstream_request_->resetStream(); + + updateOutlierDetection(timeout_response_code_, *upstream_requests_.front().get()); + upstream_requests_.front()->resetStream(); } - updateOutlierDetection(timeout_response_code_); onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout); } -void Filter::onPerTryTimeout() { - updateOutlierDetection(timeout_response_code_); +void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { + updateOutlierDetection(timeout_response_code_, upstream_request); - if (maybeRetryReset(Http::StreamResetReason::LocalReset)) { + if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) { return; } onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout); } -void Filter::updateOutlierDetection(Http::Code code) { - Upstream::HostDescriptionConstSharedPtr upstream_host; - if (upstream_request_) { - upstream_host = upstream_request_->upstream_host_; - if (upstream_host) { - upstream_host->outlierDetector().putHttpResponseCode(enumToInt(code)); - } +void Filter::updateOutlierDetection(Http::Code code, UpstreamRequest& upstream_request) { + if (upstream_request.upstream_host_) { + upstream_request.upstream_host_->outlierDetector().putHttpResponseCode(enumToInt(code)); } } @@ -565,11 +567,12 @@ void Filter::onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flags) { void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flags, absl::string_view body, bool dropped) { + ASSERT(upstream_requests_.size() <= 1); // If we have not yet sent anything downstream, send a response with an appropriate status code. // Otherwise just reset the ongoing response. if (downstream_response_started_) { - if (upstream_request_ != nullptr && upstream_request_->grpc_rq_success_deferred_) { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + if (upstream_requests_.size() == 1 && upstream_requests_.front()->grpc_rq_success_deferred_) { + upstream_requests_.front()->upstream_host_->stats().rq_error_.inc(); config_.stats_.rq_reset_after_downstream_response_started_.inc(); } // This will destroy any created retry timers. @@ -577,8 +580,8 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ callbacks_->resetStream(); } else { Upstream::HostDescriptionConstSharedPtr upstream_host; - if (upstream_request_) { - upstream_host = upstream_request_->upstream_host_; + if (upstream_requests_.size() == 1) { + upstream_host = upstream_requests_.front()->upstream_host_; } // This will destroy any created retry timers. @@ -605,16 +608,15 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ } } -bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason) { +bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, + UpstreamRequest& upstream_request) { // We don't retry if we already started the response. if (downstream_response_started_ || !retry_state_) { return false; } Upstream::HostDescriptionConstSharedPtr upstream_host; - if (upstream_request_) { - upstream_host = upstream_request_->upstream_host_; - } + upstream_host = upstream_request.upstream_host_; // Notify retry modifiers about the attempted host. if (upstream_host != nullptr) { @@ -638,14 +640,14 @@ bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason) { } void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, - absl::string_view transport_failure_reason) { - ASSERT(upstream_request_); + absl::string_view transport_failure_reason, + UpstreamRequest& upstream_request) { ENVOY_STREAM_LOG(debug, "upstream reset: reset reason {}", *callbacks_, Http::Utility::resetReasonToString(reset_reason)); - updateOutlierDetection(Http::Code::ServiceUnavailable); + updateOutlierDetection(Http::Code::ServiceUnavailable, upstream_request); - if (maybeRetryReset(reset_reason)) { + if (maybeRetryReset(reset_reason, upstream_request)) { return; } @@ -679,7 +681,8 @@ Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { NOT_REACHED_GCOVR_EXCL_LINE; } -void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool end_stream) { +void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, + UpstreamRequest& upstream_request, bool end_stream) { // We need to defer gRPC success until after we have processed grpc-status in // the trailers. if (grpc_request_) { @@ -687,15 +690,15 @@ void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool en absl::optional grpc_status = Grpc::Common::getGrpcStatus(headers); if (grpc_status && !Http::CodeUtility::is5xx(Grpc::Utility::grpcToHttpStatus(grpc_status.value()))) { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request.upstream_host_->stats().rq_success_.inc(); } else { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + upstream_request.upstream_host_->stats().rq_error_.inc(); } } else { - upstream_request_->grpc_rq_success_deferred_ = true; + upstream_request.grpc_rq_success_deferred_ = true; } } else { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request.upstream_host_->stats().rq_success_.inc(); } } @@ -713,24 +716,25 @@ void Filter::onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers) { } void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, - bool end_stream) { + UpstreamRequest& upstream_request, bool end_stream) { + ASSERT(upstream_requests_.size() == 1); ENVOY_STREAM_LOG(debug, "upstream headers complete: end_stream={}", *callbacks_, end_stream); - upstream_request_->upstream_host_->outlierDetector().putHttpResponseCode(response_code); + upstream_request.upstream_host_->outlierDetector().putHttpResponseCode(response_code); if (headers->EnvoyImmediateHealthCheckFail() != nullptr) { - upstream_request_->upstream_host_->healthChecker().setUnhealthy(); + upstream_request.upstream_host_->healthChecker().setUnhealthy(); } if (retry_state_) { // Notify retry modifiers about the attempted host. - retry_state_->onHostAttempted(upstream_request_->upstream_host_); + retry_state_->onHostAttempted(upstream_request.upstream_host_); + // Capture upstream_host since setupRetry() in the following line will clear + // upstream_request. + const auto upstream_host = upstream_request.upstream_host_; const RetryStatus retry_status = retry_state_->shouldRetryHeaders(*headers, [this]() -> void { doRetry(); }); - // Capture upstream_host since setupRetry() in the following line will clear - // upstream_request_. - const auto upstream_host = upstream_request_->upstream_host_; if (retry_status == RetryStatus::Yes && setupRetry(end_stream)) { Http::CodeStats& code_stats = httpContext().codeStats(); code_stats.chargeBasicResponseStat(cluster_->statsScope(), "retry.", @@ -751,7 +755,7 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& head if (static_cast(response_code) == Http::Code::Found && route_entry_->internalRedirectAction() == InternalRedirectAction::Handle && - setupRedirect(*headers)) { + setupRedirect(*headers, upstream_request)) { return; // If the redirect could not be handled, fail open and let it pass to the // next downstream. @@ -769,12 +773,12 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& head } } - upstream_request_->upstream_canary_ = + upstream_request.upstream_canary_ = (headers->EnvoyUpstreamCanary() && headers->EnvoyUpstreamCanary()->value() == "true") || - upstream_request_->upstream_host_->canary(); - chargeUpstreamCode(response_code, *headers, upstream_request_->upstream_host_, false); + upstream_request.upstream_host_->canary(); + chargeUpstreamCode(response_code, *headers, upstream_request.upstream_host_, false); if (!Http::CodeUtility::is5xx(response_code)) { - handleNon5xxResponseHeaders(*headers, end_stream); + handleNon5xxResponseHeaders(*headers, upstream_request, end_stream); } // Append routing cookies @@ -789,7 +793,7 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& head downstream_response_started_ = true; if (end_stream) { - onUpstreamComplete(); + onUpstreamComplete(upstream_request); } callbacks_->streamInfo().setResponseCodeDetails( @@ -797,29 +801,32 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& head callbacks_->encodeHeaders(std::move(headers), end_stream); } -void Filter::onUpstreamData(Buffer::Instance& data, bool end_stream) { +void Filter::onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, + bool end_stream) { + ASSERT(upstream_requests_.size() == 1); if (end_stream) { // gRPC request termination without trailers is an error. - if (upstream_request_->grpc_rq_success_deferred_) { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + if (upstream_request.grpc_rq_success_deferred_) { + upstream_request.upstream_host_->stats().rq_error_.inc(); } - onUpstreamComplete(); + onUpstreamComplete(upstream_request); } callbacks_->encodeData(data, end_stream); } -void Filter::onUpstreamTrailers(Http::HeaderMapPtr&& trailers) { - if (upstream_request_->grpc_rq_success_deferred_) { +void Filter::onUpstreamTrailers(Http::HeaderMapPtr&& trailers, UpstreamRequest& upstream_request) { + ASSERT(upstream_requests_.size() == 1); + if (upstream_request.grpc_rq_success_deferred_) { absl::optional grpc_status = Grpc::Common::getGrpcStatus(*trailers); if (grpc_status && !Http::CodeUtility::is5xx(Grpc::Utility::grpcToHttpStatus(grpc_status.value()))) { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request.upstream_host_->stats().rq_success_.inc(); } else { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + upstream_request.upstream_host_->stats().rq_error_.inc(); } } - onUpstreamComplete(); + onUpstreamComplete(upstream_request); callbacks_->encodeTrailers(std::move(trailers)); } @@ -827,9 +834,9 @@ void Filter::onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) { callbacks_->encodeMetadata(std::move(metadata_map)); } -void Filter::onUpstreamComplete() { +void Filter::onUpstreamComplete(UpstreamRequest& upstream_request) { if (!downstream_end_stream_) { - upstream_request_->resetStream(); + upstream_request.resetStream(); } if (config_.emit_dynamic_stats_ && !callbacks_->streamInfo().healthCheck() && @@ -838,7 +845,7 @@ void Filter::onUpstreamComplete() { std::chrono::milliseconds response_time = std::chrono::duration_cast( dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - upstream_request_->upstream_host_->outlierDetector().putResponseTime(response_time); + upstream_request.upstream_host_->outlierDetector().putResponseTime(response_time); const Http::HeaderEntry* internal_request_header = downstream_headers_->EnvoyInternalRequest(); const bool internal_request = @@ -852,13 +859,13 @@ void Filter::onUpstreamComplete() { cluster_->statsScope(), EMPTY_STRING, response_time, - upstream_request_->upstream_canary_, + upstream_request.upstream_canary_, internal_request, route_entry_->virtualHost().name(), request_vcluster_ ? request_vcluster_->name() : EMPTY_STRING, zone_name, - upstreamZone(upstream_request_->upstream_host_)}; + upstreamZone(upstream_request.upstream_host_)}; code_stats.chargeResponseTiming(info); @@ -867,12 +874,12 @@ void Filter::onUpstreamComplete() { cluster_->statsScope(), alt_stat_prefix_, response_time, - upstream_request_->upstream_canary_, + upstream_request.upstream_canary_, internal_request, EMPTY_STRING, EMPTY_STRING, zone_name, - upstreamZone(upstream_request_->upstream_host_)}; + upstreamZone(upstream_request.upstream_host_)}; code_stats.chargeResponseTiming(info); } @@ -891,16 +898,18 @@ bool Filter::setupRetry(bool end_stream) { return false; } + ASSERT(upstream_requests_.size() == 1); ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_); if (!end_stream) { - upstream_request_->resetStream(); + upstream_requests_.front()->resetStream(); } - upstream_request_.reset(); + upstream_requests_.front()->removeFromList(upstream_requests_); + return true; } -bool Filter::setupRedirect(const Http::HeaderMap& headers) { +bool Filter::setupRedirect(const Http::HeaderMap& headers, UpstreamRequest& upstream_request) { ENVOY_STREAM_LOG(debug, "attempting internal redirect", *callbacks_); const Http::HeaderEntry* location = headers.Location(); @@ -914,7 +923,7 @@ bool Filter::setupRedirect(const Http::HeaderMap& headers) { // completion here and check it in onDestroy. This is annoyingly complicated but is better than // needlessly resetting streams. attempting_internal_redirect_with_complete_stream_ = - upstream_request_->upstream_timing_.last_upstream_rx_byte_received_ && downstream_end_stream_; + upstream_request.upstream_timing_.last_upstream_rx_byte_received_ && downstream_end_stream_; // As with setupRetry, redirects are not supported for streaming requests yet. if (downstream_end_stream_ && @@ -949,19 +958,19 @@ void Filter::doRetry() { } ASSERT(response_timeout_ || timeout_.global_timeout_.count() == 0); - ASSERT(!upstream_request_); - upstream_request_ = std::make_unique(*this, *conn_pool); - upstream_request_->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); + UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); + upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + upstream_requests_.front()->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); // It's possible we got immediately reset. - if (upstream_request_) { + if (upstream_requests_.size() == 1) { if (callbacks_->decodingBuffer()) { // If we are doing a retry we need to make a copy. Buffer::OwnedImpl copy(*callbacks_->decodingBuffer()); - upstream_request_->encodeData(copy, !downstream_trailers_); + upstream_requests_.front()->encodeData(copy, !downstream_trailers_); } if (downstream_trailers_) { - upstream_request_->encodeTrailers(*downstream_trailers_); + upstream_requests_.front()->encodeTrailers(*downstream_trailers_); } } } @@ -1014,19 +1023,19 @@ void Filter::UpstreamRequest::decodeHeaders(Http::HeaderMapPtr&& headers, bool e upstream_headers_ = headers.get(); const uint64_t response_code = Http::Utility::getResponseStatus(*headers); stream_info_.response_code_ = static_cast(response_code); - parent_.onUpstreamHeaders(response_code, std::move(headers), end_stream); + parent_.onUpstreamHeaders(response_code, std::move(headers), *this, end_stream); } void Filter::UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); - parent_.onUpstreamData(data, end_stream); + parent_.onUpstreamData(data, *this, end_stream); } void Filter::UpstreamRequest::decodeTrailers(Http::HeaderMapPtr&& trailers) { maybeEndDecode(true); upstream_trailers_ = trailers.get(); - parent_.onUpstreamTrailers(std::move(trailers)); + parent_.onUpstreamTrailers(std::move(trailers), *this); } void Filter::UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) { @@ -1095,7 +1104,7 @@ void Filter::UpstreamRequest::onResetStream(Http::StreamResetReason reason, clearRequestEncoder(); if (!calling_encode_headers_) { stream_info_.setResponseFlag(parent_.streamResetReasonToResponseFlag(reason)); - parent_.onUpstreamReset(reason, transport_failure_reason); + parent_.onUpstreamReset(reason, transport_failure_reason, *this); } else { deferred_reset_reason_ = reason; } @@ -1136,7 +1145,7 @@ void Filter::UpstreamRequest::onPerTryTimeout() { } resetStream(); stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout); - parent_.onPerTryTimeout(); + parent_.onPerTryTimeout(*this); } else { ENVOY_STREAM_LOG(debug, "ignored upstream per try timeout due to already started downstream response", @@ -1238,6 +1247,7 @@ void Filter::UpstreamRequest::clearRequestEncoder() { void Filter::UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHighWatermark() { ASSERT(parent_.request_encoder_); + ASSERT(parent_.parent_.upstream_requests_.size() == 1); // The downstream connection is overrun. Pause reads from upstream. parent_.parent_.cluster_->stats().upstream_flow_control_paused_reading_total_.inc(); parent_.request_encoder_->getStream().readDisable(true); @@ -1245,6 +1255,7 @@ void Filter::UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHigh void Filter::UpstreamRequest::DownstreamWatermarkManager::onBelowWriteBufferLowWatermark() { ASSERT(parent_.request_encoder_); + ASSERT(parent_.parent_.upstream_requests_.size() == 1); // The downstream connection has buffer available. Resume reads from upstream. parent_.parent_.cluster_->stats().upstream_flow_control_resumed_reading_total_.inc(); parent_.request_encoder_->getStream().readDisable(false); diff --git a/source/common/router/router.h b/source/common/router/router.h index 8bbd312974e90..bb39d1391a465 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -21,6 +21,7 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/hash.h" #include "common/common/hex.h" +#include "common/common/linked_object.h" #include "common/common/logger.h" #include "common/config/well_known_names.h" #include "common/http/utility.h" @@ -268,7 +269,8 @@ class Filter : Logger::Loggable, private: struct UpstreamRequest : public Http::StreamDecoder, public Http::StreamCallbacks, - public Http::ConnectionPool::Callbacks { + public Http::ConnectionPool::Callbacks, + public LinkedObject { UpstreamRequest(Filter& parent, Http::ConnectionPool::Instance& pool); ~UpstreamRequest(); @@ -301,10 +303,12 @@ class Filter : Logger::Loggable, void onBelowWriteBufferLowWatermark() override { enableDataFromDownstream(); } void disableDataFromDownstream() { + ASSERT(parent_.upstream_requests_.size() == 1); parent_.cluster_->stats().upstream_flow_control_backed_up_total_.inc(); parent_.callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); } void enableDataFromDownstream() { + ASSERT(parent_.upstream_requests_.size() == 1); parent_.cluster_->stats().upstream_flow_control_drained_total_.inc(); parent_.callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); } @@ -371,8 +375,8 @@ class Filter : Logger::Loggable, Upstream::ResourcePriority priority) PURE; Http::ConnectionPool::Instance* getConnPool(); void maybeDoShadowing(); - bool maybeRetryReset(Http::StreamResetReason reset_reason); - void onPerTryTimeout(); + bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request); + void onPerTryTimeout(UpstreamRequest& upstream_request); void onRequestComplete(); void onResponseTimeout(); void onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers); @@ -383,20 +387,23 @@ class Filter : Logger::Loggable, // downstream if appropriate. void onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flag, absl::string_view body, bool dropped); - void onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, bool end_stream); - void onUpstreamData(Buffer::Instance& data, bool end_stream); - void onUpstreamTrailers(Http::HeaderMapPtr&& trailers); + void onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream); + void onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, bool end_stream); + void onUpstreamTrailers(Http::HeaderMapPtr&& trailers, UpstreamRequest& upstream_request); void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map); - void onUpstreamComplete(); - void onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure); + void onUpstreamComplete(UpstreamRequest& upstream_request); + void onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure, + UpstreamRequest& upstream_request); void sendNoHealthyUpstreamResponse(); bool setupRetry(bool end_stream); - bool setupRedirect(const Http::HeaderMap& headers); - void updateOutlierDetection(Http::Code code); + bool setupRedirect(const Http::HeaderMap& headers, UpstreamRequest& upstream_request); + void updateOutlierDetection(Http::Code code, UpstreamRequest& upstream_request); void doRetry(); // Called immediately after a non-5xx header is received from upstream, performs stats accounting // and handle difference between gRPC and non-gRPC requests. - void handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool end_stream); + void handleNon5xxResponseHeaders(const Http::HeaderMap& headers, + UpstreamRequest& upstream_request, bool end_stream); TimeSource& timeSource() { return config_.timeSource(); } Http::Context& httpContext() { return config_.http_context_; } @@ -410,7 +417,7 @@ class Filter : Logger::Loggable, Event::TimerPtr response_timeout_; FilterUtility::TimeoutData timeout_; Http::Code timeout_response_code_ = Http::Code::GatewayTimeout; - UpstreamRequestPtr upstream_request_; + std::list upstream_requests_; bool grpc_request_{}; Http::HeaderMap* downstream_headers_{}; Http::HeaderMap* downstream_trailers_{}; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 656efa37e44da..595c415246a83 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -2968,6 +2968,12 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWith MockDownstreamWatermarkCallbacks callbacks; EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); + + // Ensures that when new callbacks are registered they get invoked immediately + // and the already-registered callbacks do not. + MockDownstreamWatermarkCallbacks callbacks2; + EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); + decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); } } @@ -3086,10 +3092,13 @@ TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { MockDownstreamWatermarkCallbacks callbacks; decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); + MockDownstreamWatermarkCallbacks callbacks2; + decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); // Now overload the buffer with response data. The downstream watermark // callbacks should be called. EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); Buffer::OwnedImpl fake_response("A long enough string to go over watermarks"); EXPECT_CALL(*encoder_filters_[1], encodeData(_, false)) .WillOnce(Return(FilterDataStatus::StopIterationAndWatermark)); @@ -3098,6 +3107,7 @@ TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { // Change the limit so the buffered data is below the new watermark. buffer_len = encoder_filters_[1]->callbacks_->encodingBuffer()->length(); EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()); + EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()); encoder_filters_[1]->callbacks_->setEncoderBufferLimit((buffer_len + 1) * 2); } From 71634517d5e66380967ebf6bd8aeb44e7d5e4b4a Mon Sep 17 00:00:00 2001 From: Maxime Bedard Date: Tue, 16 Apr 2019 11:54:01 -0400 Subject: [PATCH 129/165] redis: prefixed routing (#6413) Signed-off-by: Maxime Bedard --- DEPRECATED.md | 2 +- .../network/redis_proxy/v2/redis_proxy.proto | 63 +++++- docs/root/intro/arch_overview/redis.rst | 5 +- docs/root/intro/deprecated.rst | 1 + docs/root/intro/version_history.rst | 1 + source/common/common/utility.h | 34 +++- .../filters/network/redis_proxy/BUILD | 28 ++- .../redis_proxy/command_splitter_impl.cc | 76 +++++--- .../redis_proxy/command_splitter_impl.h | 38 ++-- .../filters/network/redis_proxy/config.cc | 43 +++- .../filters/network/redis_proxy/conn_pool.h | 2 +- .../network/redis_proxy/conn_pool_impl.h | 1 - .../network/redis_proxy/proxy_filter.cc | 2 +- .../network/redis_proxy/proxy_filter.h | 1 - .../filters/network/redis_proxy/router.h | 37 ++++ .../network/redis_proxy/router_impl.cc | 61 ++++++ .../filters/network/redis_proxy/router_impl.h | 53 +++++ test/common/common/utility_test.cc | 37 ++++ .../filters/network/redis_proxy/BUILD | 13 ++ .../redis_proxy/command_lookup_speed_test.cc | 19 +- .../redis_proxy/command_splitter_impl_test.cc | 20 +- .../network/redis_proxy/config_test.cc | 15 ++ .../redis_proxy/conn_pool_impl_test.cc | 3 +- .../filters/network/redis_proxy/mocks.cc | 3 + .../filters/network/redis_proxy/mocks.h | 9 + .../network/redis_proxy/proxy_filter_test.cc | 2 +- .../redis_proxy_integration_test.cc | 136 ++++++++++++- .../network/redis_proxy/router_impl_test.cc | 183 ++++++++++++++++++ 28 files changed, 789 insertions(+), 99 deletions(-) create mode 100644 source/extensions/filters/network/redis_proxy/router.h create mode 100644 source/extensions/filters/network/redis_proxy/router_impl.cc create mode 100644 source/extensions/filters/network/redis_proxy/router_impl.h create mode 100644 test/extensions/filters/network/redis_proxy/router_impl_test.cc diff --git a/DEPRECATED.md b/DEPRECATED.md index ab705e0ac558a..1b2962adcb975 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -1,3 +1,3 @@ # DEPRECATED -The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated) can be found in the official Envoy developer documentation. \ No newline at end of file +The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated) can be found in the official Envoy developer documentation. diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 23448eff903f9..16196cc07a3b1 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -22,7 +22,13 @@ message RedisProxy { // Name of cluster from cluster manager. See the :ref:`configuration section // ` of the architecture overview for recommendations on // configuring the backing cluster. - string cluster = 2 [(validate.rules).string.min_bytes = 1]; + // + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch-all + // cluster` + // instead. + string cluster = 2 [deprecated = true]; // Redis connection pool settings. message ConnPoolSettings { @@ -55,10 +61,63 @@ message RedisProxy { bool enable_redirection = 3; } - // Network settings for the connection pool to the upstream cluster. + // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; // Indicates that latency stat should be computed in microseconds. By default it is computed in // milliseconds. bool latency_in_micros = 4; + + message PrefixRoutes { + message Route { + // String prefix that must match the beginning of the keys. Envoy will always favor the + // longest match. + string prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // Indicates if the prefix needs to be removed from the key when forwarded. + bool remove_prefix = 2; + + // Upstream cluster to forward the command to. + string cluster = 3 [(validate.rules).string.min_bytes = 1]; + } + + // List of prefix routes. + repeated Route routes = 1 [(gogoproto.nullable) = false]; + + // Indicates that prefix matching should be case insensitive. + bool case_insensitive = 2; + + // Optional catch-all route to forward commands that doesn't match any of the routes. The + // catch-all route becomes required when no routes are specified. + string catch_all_cluster = 3; + } + + // List of **unique** prefixes used to separate keys from different workloads to different + // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all + // cluster can be used to forward commands when there is no match. Time complexity of the + // lookups are in O(min(longest key prefix, key length)). + // + // Example: + // + // .. code-block:: yaml + // + // prefix_routes: + // routes: + // - prefix: "ab" + // cluster: "cluster_a" + // - prefix: "abc" + // cluster: "cluster_b" + // + // When using the above routes, the following prefixes would be sent to: + // + // * 'get abc:users' would retrive the key 'abc:users' from cluster_b. + // * 'get ab:users' would retrive the key 'ab:users' from cluster_a. + // * 'get z:users' would return a NoUpstreamHost error. A :ref:`catch-all + // cluster` + // would have retrieved the key from that cluster instead. + // + // See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing clusters. + PrefixRoutes prefix_routes = 5 [(gogoproto.nullable) = false]; } diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst index e2f8efbc25656..4d2929a14e2ae 100644 --- a/docs/root/intro/arch_overview/redis.rst +++ b/docs/root/intro/arch_overview/redis.rst @@ -8,7 +8,9 @@ In this mode, the goals of Envoy are to maintain availability and partition tole over consistency. This is the key point when comparing Envoy to `Redis Cluster `_. Envoy is designed as a best-effort cache, meaning that it will not try to reconcile inconsistent data or keep a globally consistent -view of cluster membership. +view of cluster membership. It also supports routing commands from different workload to +different to different upstream clusters based on their access patterns, eviction, or isolation +requirements. The Redis project offers a thorough reference on partitioning as it relates to Redis. See "`Partitioning: how to split data among multiple Redis instances @@ -22,6 +24,7 @@ The Redis project offers a thorough reference on partitioning as it relates to R * Detailed command statistics. * Active and passive healthchecking. * Hash tagging. +* Prefix routing. **Planned future enhancements**: diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst index 423a16d492e5c..b3a7b7d918a2d 100644 --- a/docs/root/intro/deprecated.rst +++ b/docs/root/intro/deprecated.rst @@ -12,6 +12,7 @@ Deprecated items below are listed in chronological order. Version 1.11.0 (Pending) ======================== +* Use of :ref:`cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_cluster ` instead. Version 1.10.0 (Apr 5, 2019) ============================ diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index ffe16eda68412..93dcb931cc0e7 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -6,6 +6,7 @@ Version history * dubbo_proxy: support the :ref:`Dubbo proxy filter `. * event: added :ref:`loop duration and poll delay statistics `. * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. +* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. * redis: add support for zpopmax and zpopmin commands. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. diff --git a/source/common/common/utility.h b/source/common/common/utility.h index 785df6d8aa404..9eaddb7f64da1 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -568,8 +568,11 @@ template struct TrieLookupTable { * Adds an entry to the Trie at the given Key. * @param key the key used to add the entry. * @param value the value to be associated with the key. + * @param overwrite_existing will overwrite the value when the value for a given key already + * exists. + * @return false when a value already exists for the given key. */ - void add(const char* key, Value value) { + bool add(const char* key, Value value, bool overwrite_existing = true) { TrieEntry* current = &root_; while (uint8_t c = *key) { if (!current->entries_[c]) { @@ -578,7 +581,11 @@ template struct TrieLookupTable { current = current->entries_[c].get(); key++; } + if (current->value_ && !overwrite_existing) { + return false; + } current->value_ = value; + return true; } /** @@ -599,6 +606,31 @@ template struct TrieLookupTable { return current->value_; } + /** + * Finds the entry associated with the longest prefix. Complexity is O(min(longest key prefix, key + * length)) + * @param key the key used to find. + * @return the value matching the longest prefix based on the key. + */ + Value findLongestPrefix(const char* key) const { + const TrieEntry* current = &root_; + const TrieEntry* result = nullptr; + while (uint8_t c = *key) { + if (current->value_) { + result = current; + } + + // https://github.com/facebook/mcrouter/blob/master/mcrouter/lib/fbi/cpp/Trie-inl.h#L126-L143 + current = current->entries_[c].get(); + if (current == nullptr) { + return result ? result->value_ : nullptr; + } + + key++; + } + return current ? current->value_ : result->value_; + } + TrieEntry root_; }; diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 4c56109ada4cd..9825a435144e7 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -30,13 +30,22 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "router_interface", + hdrs = ["router.h"], + deps = [ + ":conn_pool_interface", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", + ], +) + envoy_cc_library( name = "command_splitter_lib", srcs = ["command_splitter_impl.cc"], hdrs = ["command_splitter_impl.h"], deps = [ ":command_splitter_interface", - ":conn_pool_interface", + ":router_interface", "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan", "//source/common/common:assert_lib", @@ -54,7 +63,6 @@ envoy_cc_library( hdrs = ["conn_pool_impl.h"], deps = [ ":conn_pool_interface", - "//include/envoy/router:router_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/buffer:buffer_lib", @@ -97,7 +105,21 @@ envoy_cc_library( "//source/extensions/filters/network/common:factory_base_lib", "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", - "//source/extensions/filters/network/redis_proxy:conn_pool_lib", "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", + "//source/extensions/filters/network/redis_proxy:router_lib", + ], +) + +envoy_cc_library( + name = "router_lib", + srcs = ["router_impl.cc"], + hdrs = ["router_impl.h"], + deps = [ + ":router_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:to_lower_table_lib", + "//source/extensions/filters/network/redis_proxy:conn_pool_lib", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index 9fc6189a393e7..276c1ac15f867 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -140,16 +140,20 @@ void SingleServerRequest::cancel() { handle_ = nullptr; } -SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, +SplitRequestPtr SimpleRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ new SimpleRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->conn_pool_ = &conn_pool; - request_ptr->handle_ = conn_pool.makeRequest(incoming_request->asArray()[1].asString(), - *incoming_request, *request_ptr); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[1].asString()); + if (conn_pool) { + request_ptr->conn_pool_ = conn_pool; + request_ptr->handle_ = conn_pool->makeRequest(incoming_request->asArray()[1].asString(), + *incoming_request, *request_ptr); + } + if (!request_ptr->handle_) { callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); return nullptr; @@ -159,8 +163,7 @@ SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, return std::move(request_ptr); } -SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, +SplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { // EVAL looks like: EVAL script numkeys key [key ...] arg [arg ...] @@ -174,9 +177,13 @@ SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, std::unique_ptr request_ptr{ new EvalRequest(callbacks, command_stats, time_source, latency_in_micros)}; - request_ptr->conn_pool_ = &conn_pool; - request_ptr->handle_ = conn_pool.makeRequest(incoming_request->asArray()[3].asString(), - *incoming_request, *request_ptr); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[3].asString()); + if (conn_pool) { + request_ptr->conn_pool_ = conn_pool; + request_ptr->handle_ = conn_pool->makeRequest(incoming_request->asArray()[3].asString(), + *incoming_request, *request_ptr); + } + if (!request_ptr->handle_) { command_stats.error_.inc(); callbacks.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); @@ -208,8 +215,7 @@ void FragmentedRequest::onChildFailure(uint32_t index) { onChildResponse(Utility::makeError(Response::get().UpstreamFailure), index); } -SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, +SplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { std::unique_ptr request_ptr{ @@ -237,9 +243,13 @@ SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, single_mget.asArray()[1].asString() = incoming_request->asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel get: '{}'", single_mget.toString()); - pending_request.conn_pool_ = &conn_pool; - pending_request.handle_ = conn_pool.makeRequest(incoming_request->asArray()[i].asString(), - single_mget, pending_request); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[i].asString()); + if (conn_pool) { + pending_request.conn_pool_ = conn_pool; + pending_request.handle_ = conn_pool->makeRequest(incoming_request->asArray()[i].asString(), + single_mget, pending_request); + } + if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -254,7 +264,7 @@ SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, } bool FragmentedRequest::onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, - ConnPool::Instance* conn_pool) { + const ConnPool::InstanceSharedPtr& conn_pool) { std::vector err; bool ask_redirection = false; if (redirectionArgsInvalid(incoming_request_.get(), value, err, ask_redirection) || !conn_pool) { @@ -330,8 +340,7 @@ void MGETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { request.asArray().swap(values); } -SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, +SplitRequestPtr MSETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { if ((incoming_request->asArray().size() - 1) % 2 != 0) { @@ -366,9 +375,13 @@ SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, single_mset.asArray()[2].asString() = incoming_request->asArray()[i + 1].asString(); ENVOY_LOG(debug, "redis: parallel set: '{}'", single_mset.toString()); - pending_request.conn_pool_ = &conn_pool; - pending_request.handle_ = conn_pool.makeRequest(incoming_request->asArray()[i].asString(), - single_mset, pending_request); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[i].asString()); + if (conn_pool) { + pending_request.conn_pool_ = conn_pool; + pending_request.handle_ = conn_pool->makeRequest(incoming_request->asArray()[i].asString(), + single_mset, pending_request); + } + if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -427,7 +440,7 @@ void MSETRequest::recreate(Common::Redis::RespValue& request, uint32_t index) { request.asArray().swap(values); } -SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, +SplitRequestPtr SplitKeysSumResultRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, @@ -456,9 +469,13 @@ SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, single_fragment.asArray()[1].asString() = incoming_request->asArray()[i].asString(); ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request->asArray()[0].asString(), single_fragment.toString()); - pending_request.conn_pool_ = &conn_pool; - pending_request.handle_ = conn_pool.makeRequest(incoming_request->asArray()[i].asString(), - single_fragment, pending_request); + auto conn_pool = router.upstreamPool(incoming_request->asArray()[i].asString()); + if (conn_pool) { + pending_request.conn_pool_ = conn_pool; + pending_request.handle_ = conn_pool->makeRequest(incoming_request->asArray()[i].asString(), + single_fragment, pending_request); + } + if (!pending_request.handle_) { pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost)); } @@ -515,12 +532,11 @@ void SplitKeysSumResultRequest::recreate(Common::Redis::RespValue& request, uint request.asArray().swap(values); } -InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, - const std::string& stat_prefix, TimeSource& time_source, - bool latency_in_micros) - : conn_pool_(std::move(conn_pool)), simple_command_handler_(*conn_pool_), - eval_command_handler_(*conn_pool_), mget_handler_(*conn_pool_), mset_handler_(*conn_pool_), - split_keys_sum_result_handler_(*conn_pool_), +InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros) + : router_(std::move(router)), simple_command_handler_(*router_), + eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_), + split_keys_sum_result_handler_(*router_), stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))}, latency_in_micros_(latency_in_micros), time_source_(time_source) { for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 21eb847c73cf0..5ca017ca8fdb9 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -17,6 +17,7 @@ #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" +#include "extensions/filters/network/redis_proxy/router.h" namespace Envoy { namespace Extensions { @@ -68,9 +69,9 @@ class CommandHandler { class CommandHandlerBase { protected: - CommandHandlerBase(ConnPool::Instance& conn_pool) : conn_pool_(conn_pool) {} + CommandHandlerBase(Router& router) : router_(router) {} - ConnPool::Instance& conn_pool_; + Router& router_; }; class SplitRequestBase : public SplitRequest { @@ -114,7 +115,7 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien : SplitRequestBase(command_stats, time_source, latency_in_micros), callbacks_(callbacks) {} SplitCallbacks& callbacks_; - ConnPool::Instance* conn_pool_{}; + ConnPool::InstanceSharedPtr conn_pool_; Common::Redis::Client::PoolRequest* handle_{}; Common::Redis::RespValuePtr incoming_request_; }; @@ -124,8 +125,7 @@ class SingleServerRequest : public SplitRequestBase, public Common::Redis::Clien */ class SimpleRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -140,8 +140,7 @@ class SimpleRequest : public SingleServerRequest { */ class EvalRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -184,13 +183,13 @@ class FragmentedRequest : public SplitRequestBase { FragmentedRequest& parent_; const uint32_t index_; Common::Redis::Client::PoolRequest* handle_{}; - ConnPool::Instance* conn_pool_{}; + ConnPool::InstanceSharedPtr conn_pool_; }; virtual void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) PURE; void onChildFailure(uint32_t index); bool onChildRedirection(const Common::Redis::RespValue& value, uint32_t index, - ConnPool::Instance* conn_pool); + const ConnPool::InstanceSharedPtr& conn_pool); virtual void recreate(Common::Redis::RespValue& request, uint32_t index) PURE; SplitCallbacks& callbacks_; @@ -208,8 +207,7 @@ class FragmentedRequest : public SplitRequestBase { */ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -231,8 +229,7 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -255,8 +252,7 @@ class SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, - Common::Redis::RespValuePtr&& incoming_request, + static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros); @@ -277,12 +273,12 @@ class MSETRequest : public FragmentedRequest, Logger::Loggable class CommandHandlerFactory : public CommandHandler, CommandHandlerBase { public: - CommandHandlerFactory(ConnPool::Instance& conn_pool) : CommandHandlerBase(conn_pool) {} + CommandHandlerFactory(Router& router) : CommandHandlerBase(router) {} SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, bool latency_in_micros) { - return RequestClass::create(conn_pool_, std::move(request), callbacks, command_stats, - time_source, latency_in_micros); + return RequestClass::create(router_, std::move(request), callbacks, command_stats, time_source, + latency_in_micros); } }; @@ -304,8 +300,8 @@ struct InstanceStats { class InstanceImpl : public Instance, Logger::Loggable { public: - InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope, - const std::string& stat_prefix, TimeSource& time_source, bool latency_in_micros); + InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros); // RedisProxy::CommandSplitter::Instance SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, @@ -323,7 +319,7 @@ class InstanceImpl : public Instance, Logger::Loggable { CommandHandler& handler); void onInvalidRequest(SplitCallbacks& callbacks); - ConnPool::InstancePtr conn_pool_; + RouterPtr router_; CommandHandlerFactory simple_command_handler_; CommandHandlerFactory eval_command_handler_; CommandHandlerFactory mget_handler_; diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index bae74e8633713..9838c2cc5ebf4 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -11,8 +11,8 @@ #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" -#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" #include "extensions/filters/network/redis_proxy/proxy_filter.h" +#include "extensions/filters/network/redis_proxy/router_impl.h" namespace Envoy { namespace Extensions { @@ -24,18 +24,43 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP Server::Configuration::FactoryContext& context) { ASSERT(!proto_config.stat_prefix().empty()); - ASSERT(!proto_config.cluster().empty()); ASSERT(proto_config.has_settings()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, context.scope(), context.drainDecision(), context.runtime())); - ConnPool::InstancePtr conn_pool( - new ConnPool::InstanceImpl(filter_config->cluster_name_, context.clusterManager(), - Common::Redis::Client::ClientFactoryImpl::instance_, - context.threadLocal(), proto_config.settings())); - std::shared_ptr splitter(new CommandSplitter::InstanceImpl( - std::move(conn_pool), context.scope(), filter_config->stat_prefix_, context.timeSource(), - proto_config.latency_in_micros())); + + envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes prefix_routes( + proto_config.prefix_routes()); + + // set the catch-all route from the deprecated cluster and settings parameters. + if (prefix_routes.catch_all_cluster().empty() && prefix_routes.routes_size() == 0) { + if (proto_config.cluster().empty()) { + throw EnvoyException("cannot configure a redis-proxy without any upstream"); + } + + prefix_routes.set_catch_all_cluster(proto_config.cluster()); + } + + std::set unique_clusters; + for (auto& route : prefix_routes.routes()) { + unique_clusters.emplace(route.cluster()); + } + unique_clusters.emplace(prefix_routes.catch_all_cluster()); + + Upstreams upstreams; + for (auto& cluster : unique_clusters) { + upstreams.emplace(cluster, std::make_shared( + cluster, context.clusterManager(), + Common::Redis::Client::ClientFactoryImpl::instance_, + context.threadLocal(), proto_config.settings())); + } + + auto router = std::make_unique(prefix_routes, std::move(upstreams)); + + std::shared_ptr splitter = + std::make_shared( + std::move(router), context.scope(), filter_config->stat_prefix_, context.timeSource(), + proto_config.latency_in_micros()); return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { Common::Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index 44ec83c76779b..a926f568f062a 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -50,7 +50,7 @@ class Instance { Common::Redis::Client::PoolCallbacks& callbacks) PURE; }; -typedef std::unique_ptr InstancePtr; +typedef std::shared_ptr InstanceSharedPtr; } // namespace ConnPool } // namespace RedisProxy diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index ef39f732d1b9e..8ed565ac50b92 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -40,7 +40,6 @@ class InstanceImpl : public Instance { const std::string& cluster_name, Upstream::ClusterManager& cm, Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls, const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); - // RedisProxy::ConnPool::Instance Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, const Common::Redis::RespValue& request, diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index 4fa59b5ad320e..acc5ccca0e211 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -17,7 +17,7 @@ namespace RedisProxy { ProxyFilterConfig::ProxyFilterConfig( const envoy::config::filter::network::redis_proxy::v2::RedisProxy& config, Stats::Scope& scope, const Network::DrainDecision& drain_decision, Runtime::Loader& runtime) - : drain_decision_(drain_decision), runtime_(runtime), cluster_name_(config.cluster()), + : drain_decision_(drain_decision), runtime_(runtime), stat_prefix_(fmt::format("redis.{}.", config.stat_prefix())), stats_(generateStats(stat_prefix_, scope)) {} diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index 3f8dc62d6eecd..ae2141a322d94 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -56,7 +56,6 @@ class ProxyFilterConfig { const Network::DrainDecision& drain_decision_; Runtime::Loader& runtime_; - const std::string cluster_name_; const std::string stat_prefix_; const std::string redis_drain_close_runtime_key_{"redis.drain_close_enabled"}; ProxyStats stats_; diff --git a/source/extensions/filters/network/redis_proxy/router.h b/source/extensions/filters/network/redis_proxy/router.h new file mode 100644 index 0000000000000..5312e34cea4be --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" + +#include "extensions/filters/network/redis_proxy/conn_pool.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +/* + * Decorator of a connection pool in order to enable key based routing. + */ +class Router { +public: + virtual ~Router() = default; + + /** + * Returns a connection pool that matches a given route. When no match is found, the catch all + * pool is used. When remove prefix is set to true, the prefix will be removed from the key. + * @param key mutable reference to the key of the current command. + * @return a handle to the connection pool. + */ + virtual ConnPool::InstanceSharedPtr upstreamPool(std::string& key) PURE; +}; + +typedef std::unique_ptr RouterPtr; + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.cc b/source/extensions/filters/network/redis_proxy/router_impl.cc new file mode 100644 index 0000000000000..cd963e1ec778c --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router_impl.cc @@ -0,0 +1,61 @@ +#include "extensions/filters/network/redis_proxy/router_impl.h" + +#include "common/common/fmt.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +PrefixRoutes::PrefixRoutes( + const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& config, + Upstreams&& upstreams) + : case_insensitive_(config.case_insensitive()), upstreams_(std::move(upstreams)), + catch_all_upstream_(config.catch_all_cluster().empty() + ? nullptr + : upstreams_.at(config.catch_all_cluster())) { + + for (auto const& route : config.routes()) { + std::string copy(route.prefix()); + + if (case_insensitive_) { + to_lower_table_.toLowerCase(copy); + } + + auto success = prefix_lookup_table_.add(copy.c_str(), + std::make_shared(Prefix{ + route.prefix(), + route.remove_prefix(), + upstreams_.at(route.cluster()), + }), + false); + if (!success) { + throw EnvoyException(fmt::format("prefix `{}` already exists.", route.prefix())); + } + } +} + +ConnPool::InstanceSharedPtr PrefixRoutes::upstreamPool(std::string& key) { + PrefixPtr value = nullptr; + if (case_insensitive_) { + std::string copy(key); + to_lower_table_.toLowerCase(copy); + value = prefix_lookup_table_.findLongestPrefix(copy.c_str()); + } else { + value = prefix_lookup_table_.findLongestPrefix(key.c_str()); + } + + if (value != nullptr) { + if (value->remove_prefix) { + key.erase(0, value->prefix.length()); + } + return value->upstream; + } + + return catch_all_upstream_; +} + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/router_impl.h b/source/extensions/filters/network/redis_proxy/router_impl.h new file mode 100644 index 0000000000000..2744e88eff4cd --- /dev/null +++ b/source/extensions/filters/network/redis_proxy/router_impl.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/to_lower_table.h" + +#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" +#include "extensions/filters/network/redis_proxy/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +typedef std::map Upstreams; + +class PrefixRoutes : public Router { +public: + PrefixRoutes(const envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes& + prefix_routes, + Upstreams&& upstreams); + + ConnPool::InstanceSharedPtr upstreamPool(std::string& key) override; + +private: + struct Prefix { + const std::string prefix; + const bool remove_prefix; + ConnPool::InstanceSharedPtr upstream; + }; + + typedef std::shared_ptr PrefixPtr; + + TrieLookupTable prefix_lookup_table_; + const ToLowerTable to_lower_table_; + const bool case_insensitive_; + Upstreams upstreams_; + ConnPool::InstanceSharedPtr catch_all_upstream_; +}; + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/common/common/utility_test.cc b/test/common/common/utility_test.cc index 6434cd140280b..e2a084651065a 100644 --- a/test/common/common/utility_test.cc +++ b/test/common/common/utility_test.cc @@ -828,4 +828,41 @@ TEST(DateFormatter, FromTimeSameWildcard) { DateFormatter("%Y-%m-%dT%H:%M:%S.000Z%1f%2f").fromTime(time1)); } +TEST(TrieLookupTable, AddItems) { + TrieLookupTable trie; + EXPECT_TRUE(trie.add("foo", "a")); + EXPECT_TRUE(trie.add("bar", "b")); + EXPECT_EQ("a", trie.find("foo")); + EXPECT_EQ("b", trie.find("bar")); + + // overwrite_existing = false + EXPECT_FALSE(trie.add("foo", "c", false)); + EXPECT_EQ("a", trie.find("foo")); + + // overwrite_existing = true + EXPECT_TRUE(trie.add("foo", "c")); + EXPECT_EQ("c", trie.find("foo")); +} + +TEST(TrieLookupTable, LongestPrefix) { + TrieLookupTable trie; + EXPECT_TRUE(trie.add("foo", "a")); + EXPECT_TRUE(trie.add("bar", "b")); + EXPECT_TRUE(trie.add("baro", "c")); + + EXPECT_EQ("a", trie.find("foo")); + EXPECT_EQ("a", trie.findLongestPrefix("foo")); + EXPECT_EQ("a", trie.findLongestPrefix("foosball")); + + EXPECT_EQ("b", trie.find("bar")); + EXPECT_EQ("b", trie.findLongestPrefix("bar")); + EXPECT_EQ("b", trie.findLongestPrefix("baritone")); + EXPECT_EQ("c", trie.findLongestPrefix("barometer")); + + EXPECT_EQ(nullptr, trie.find("toto")); + EXPECT_EQ(nullptr, trie.findLongestPrefix("toto")); + EXPECT_EQ(nullptr, trie.find(" ")); + EXPECT_EQ(nullptr, trie.findLongestPrefix(" ")); +} + } // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 2ae0acae9a7b0..eb6d003e5927e 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -75,6 +75,7 @@ envoy_cc_mock( "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", + "//source/extensions/filters/network/redis_proxy:router_interface", ], ) @@ -106,6 +107,18 @@ envoy_extension_cc_test_binary( ], ) +envoy_extension_cc_test( + name = "router_impl_test", + srcs = ["router_impl_test.cc"], + extension_name = "envoy.filters.network.redis_proxy", + deps = [ + ":redis_mocks", + "//source/extensions/filters/network/redis_proxy:router_lib", + "//test/extensions/filters/network/common/redis:redis_mocks", + "//test/test_common:utility_lib", + ], +) + envoy_extension_cc_test( name = "redis_proxy_integration_test", size = "small", diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index aa1964df7f7ad..555088f075731 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -32,17 +32,8 @@ class NoOpSplitCallbacks : public CommandSplitter::SplitCallbacks { void onResponse(Common::Redis::RespValuePtr&&) override {} }; -class NullInstanceImpl : public ConnPool::Instance { - Common::Redis::Client::PoolRequest* makeRequest(const std::string&, - const Common::Redis::RespValue&, - Common::Redis::Client::PoolCallbacks&) override { - return nullptr; - } - Common::Redis::Client::PoolRequest* - makeRequestToHost(const std::string&, const Common::Redis::RespValue&, - Common::Redis::Client::PoolCallbacks&) override { - return nullptr; - } +class NullRouterImpl : public Router { + ConnPool::InstanceSharedPtr upstreamPool(std::string&) override { return nullptr; } }; class CommandLookUpSpeedTest { @@ -73,11 +64,11 @@ class CommandLookUpSpeedTest { } } - ConnPool::Instance* conn_pool_{new NullInstanceImpl()}; + Router* router_{new NullRouterImpl()}; Stats::IsolatedStoreImpl store_; Event::SimulatedTimeSystem time_system_; - CommandSplitter::InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", - time_system_, false}; + CommandSplitter::InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, + false}; NoOpSplitCallbacks callbacks_; CommandSplitter::SplitRequestPtr handle_; }; diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index ae7a9839147b6..207f6871aed8e 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -38,6 +38,16 @@ namespace NetworkFilters { namespace RedisProxy { namespace CommandSplitter { +class PassthruRouter : public Router { +public: + PassthruRouter(ConnPool::InstanceSharedPtr conn_pool) : conn_pool_(conn_pool) {} + + ConnPool::InstanceSharedPtr upstreamPool(std::string&) override { return conn_pool_; } + +private: + ConnPool::InstanceSharedPtr conn_pool_; +}; + class RedisCommandSplitterImplTest : public testing::Test { public: void makeBulkStringArray(Common::Redis::RespValue& value, @@ -55,8 +65,8 @@ class RedisCommandSplitterImplTest : public testing::Test { ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; NiceMock store_; Event::SimulatedTimeSystem time_system_; - InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", time_system_, - false}; + InstanceImpl splitter_{std::make_unique(ConnPool::InstanceSharedPtr{conn_pool_}), + store_, "redis.foo.", time_system_, false}; MockSplitCallbacks callbacks_; SplitRequestPtr handle_; }; @@ -226,6 +236,7 @@ TEST_P(RedisSingleServerRequestTest, NoUpstream) { Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; makeBulkStringArray(*request, {GetParam(), "hello"}); EXPECT_CALL(*conn_pool_, makeRequest("hello", Ref(*request), _)).WillOnce(Return(nullptr)); + Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; @@ -328,6 +339,7 @@ TEST_F(RedisSingleServerRequestTest, EvalNoUpstream) { Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; makeBulkStringArray(*request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); EXPECT_CALL(*conn_pool_, makeRequest("key", Ref(*request), _)).WillOnce(Return(nullptr)); + Common::Redis::RespValue response; response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; @@ -1429,8 +1441,8 @@ class RedisSingleServerRequestWithLatencyMicrosTest : public RedisSingleServerRe } ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; - InstanceImpl splitter_{ConnPool::InstancePtr{conn_pool_}, store_, "redis.foo.", time_system_, - true}; + InstanceImpl splitter_{std::make_unique(ConnPool::InstanceSharedPtr{conn_pool_}), + store_, "redis.foo.", time_system_, true}; }; TEST_P(RedisSingleServerRequestWithLatencyMicrosTest, Success) { diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 351fc97a78c8f..ffca740ec4928 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -23,6 +23,21 @@ TEST(RedisProxyFilterConfigFactoryTest, ValidateFail) { ProtoValidationException); } +TEST(RedisProxyFilterConfigFactoryTest, NoUpstreamDefined) { + envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings settings; + settings.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(20)); + + envoy::config::filter::network::redis_proxy::v2::RedisProxy config; + config.set_stat_prefix("foo"); + config.mutable_settings()->CopyFrom(settings); + + NiceMock context; + + EXPECT_THROW_WITH_MESSAGE( + RedisProxyFilterConfigFactory().createFilterFactoryFromProto(config, context), EnvoyException, + "cannot configure a redis-proxy without any upstream"); +} + TEST(RedisProxyFilterConfigFactoryTest, RedisProxyNoSettings) { const std::string yaml = R"EOF( cluster: fake_cluster diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index 01d93c9a541f6..197cd910b0019 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -43,6 +43,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client if (!cluster_exists) { EXPECT_CALL(cm_, get("fake_cluster")).WillOnce(Return(nullptr)); } + conn_pool_ = std::make_unique( cluster_name_, cm_, *this, tls_, Common::Redis::Client::createConnPoolSettings(20, hashtagging, true)); @@ -78,7 +79,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client const std::string cluster_name_{"fake_cluster"}; NiceMock cm_; NiceMock tls_; - InstancePtr conn_pool_; + InstanceSharedPtr conn_pool_; Upstream::ClusterUpdateCallbacks* update_callbacks_{}; Common::Redis::Client::MockClient* client_{}; Network::Address::InstanceConstSharedPtr test_address_; diff --git a/test/extensions/filters/network/redis_proxy/mocks.cc b/test/extensions/filters/network/redis_proxy/mocks.cc index 7e0ce1eff0bde..3bbb28baba804 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.cc +++ b/test/extensions/filters/network/redis_proxy/mocks.cc @@ -15,6 +15,9 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { +MockRouter::MockRouter() {} +MockRouter::~MockRouter() {} + namespace ConnPool { MockInstance::MockInstance() {} diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index ecd104af4cd18..381ef0a19bf59 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -8,6 +8,7 @@ #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" +#include "extensions/filters/network/redis_proxy/router.h" #include "test/test_common/printers.h" @@ -18,6 +19,14 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { +class MockRouter : public Router { +public: + MockRouter(); + ~MockRouter(); + + MOCK_METHOD1(upstreamPool, ConnPool::InstanceSharedPtr(std::string& key)); +}; + namespace ConnPool { class MockInstance : public Instance { diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index 333a9687dc501..4cb73b89186b2 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -60,7 +60,7 @@ TEST_F(RedisProxyFilterConfigTest, Normal) { envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config = parseProtoFromJson(json_string); ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_); - EXPECT_EQ("fake_cluster", config.cluster_name_); + EXPECT_EQ("redis.foo.", config.stat_prefix_); } TEST_F(RedisProxyFilterConfigTest, BadRedisProxyConfig) { diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index d23c2b2649514..07997b76c0f1d 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -39,10 +39,10 @@ const std::string CONFIG = R"EOF( socket_address: address: 127.0.0.1 port_value: 0 - - endpoint: + - endpoint: address: socket_address: - address: 127.0.0.1 + address: 127.0.0.1 port_value: 0 listeners: name: listener_0 @@ -56,7 +56,7 @@ const std::string CONFIG = R"EOF( config: stat_prefix: redis_stats cluster: cluster_0 - settings: + settings: op_timeout: 5s )EOF"; @@ -65,6 +65,88 @@ const std::string CONFIG_WITH_REDIRECTION = CONFIG + R"EOF( enable_redirection: true )EOF"; +const std::string CONFIG_WITH_ROUTES = R"EOF( +admin: + access_log_path: /dev/null + address: + socket_address: + address: 127.0.0.1 + port_value: 0 +static_resources: + clusters: + - name: cluster_0 + type: STATIC + lb_policy: RANDOM + load_assignment: + cluster_name: cluster_0 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + - name: cluster_1 + type: STATIC + lb_policy: RANDOM + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1 + - name: cluster_2 + type: STATIC + lb_policy: RANDOM + load_assignment: + cluster_name: cluster_2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 2 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 2 + listeners: + name: listener_0 + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + filter_chains: + filters: + name: envoy.redis_proxy + config: + stat_prefix: redis_stats + settings: + op_timeout: 5s + prefix_routes: + catch_all_cluster: cluster_0 + routes: + - prefix: "foo:" + cluster: cluster_1 + - prefix: "baz:" + cluster: cluster_2 +)EOF"; + // This function encodes commands as an array of bulkstrings as transmitted by Redis clients to // Redis servers, according to the Redis protocol. std::string makeBulkStringArray(std::vector&& command_strings) { @@ -115,7 +197,19 @@ class RedisProxyIntegrationTest : public testing::TestWithParamwrite(request); FakeRawConnectionPtr fake_upstream_connection; - EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + EXPECT_TRUE(upstream->waitForRawConnection(fake_upstream_connection)); EXPECT_TRUE(fake_upstream_connection->waitForData(request.size(), &proxy_to_server)); // The original request should be the same as the data received by the server. EXPECT_EQ(request, proxy_to_server); @@ -445,5 +549,23 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, IgnoreRedirectionForAsking) { asking_response.str()); } +// This test verifies that it's possible to route keys to 3 different upstream pools. + +TEST_P(RedisProxyWithRoutesIntegrationTest, SimpleRequestAndResponseRoutedByPrefix) { + initialize(); + + // roundtrip to cluster_0 (catch_all route) + simpleRoundtripToUpstream(fake_upstreams_[0], makeBulkStringArray({"get", "toto"}), + "$3\r\nbar\r\n"); + + // roundtrip to cluster_1 (prefix "foo:" route) + simpleRoundtripToUpstream(fake_upstreams_[2], makeBulkStringArray({"get", "foo:123"}), + "$3\r\nbar\r\n"); + + // roundtrip to cluster_2 (prefix "baz:" route) + simpleRoundtripToUpstream(fake_upstreams_[4], makeBulkStringArray({"get", "baz:123"}), + "$3\r\nbar\r\n"); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/router_impl_test.cc b/test/extensions/filters/network/redis_proxy/router_impl_test.cc new file mode 100644 index 0000000000000..486fcb009994c --- /dev/null +++ b/test/extensions/filters/network/redis_proxy/router_impl_test.cc @@ -0,0 +1,183 @@ +#include + +#include "extensions/filters/network/redis_proxy/conn_pool_impl.h" +#include "extensions/filters/network/redis_proxy/router_impl.h" + +#include "test/extensions/filters/network/common/redis/mocks.h" +#include "test/extensions/filters/network/redis_proxy/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Eq; +using testing::InSequence; +using testing::Ref; +using testing::Return; +using testing::StrEq; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RedisProxy { + +envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes createPrefixRoutes() { + envoy::config::filter::network::redis_proxy::v2::RedisProxy::PrefixRoutes prefix_routes; + auto* routes = prefix_routes.mutable_routes(); + + { + auto* route = routes->Add(); + route->set_prefix("ab"); + route->set_cluster("fake_clusterA"); + } + + { + auto* route = routes->Add(); + route->set_prefix("a"); + route->set_cluster("fake_clusterB"); + } + + return prefix_routes; +} + +TEST(PrefixRoutesTest, MissingCatchAll) { + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", std::make_shared()); + + PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); + + std::string key("c:bar"); + EXPECT_EQ(nullptr, router.upstreamPool(key)); +} + +TEST(PrefixRoutesTest, RoutedToCatchAll) { + auto upstream_c = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", std::make_shared()); + upstreams.emplace("fake_clusterC", upstream_c); + + auto prefix_routes = createPrefixRoutes(); + prefix_routes.set_catch_all_cluster("fake_clusterC"); + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + + std::string key("c:bar"); + EXPECT_EQ(upstream_c, router.upstreamPool(key)); +} + +TEST(PrefixRoutesTest, RoutedToLongestPrefix) { + auto upstream_a = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", upstream_a); + upstreams.emplace("fake_clusterB", std::make_shared()); + + PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); + + std::string key("ab:bar"); + EXPECT_EQ(upstream_a, router.upstreamPool(key)); +} + +TEST(PrefixRoutesTest, CaseUnsensitivePrefix) { + auto upstream_a = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", upstream_a); + upstreams.emplace("fake_clusterB", std::make_shared()); + + auto prefix_routes = createPrefixRoutes(); + prefix_routes.set_case_insensitive(true); + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + + std::string key("AB:bar"); + EXPECT_EQ(upstream_a, router.upstreamPool(key)); +} + +TEST(PrefixRoutesTest, RemovePrefix) { + auto upstream_a = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", upstream_a); + upstreams.emplace("fake_clusterB", std::make_shared()); + + auto prefix_routes = createPrefixRoutes(); + + { + auto* route = prefix_routes.mutable_routes()->Add(); + route->set_prefix("abc"); + route->set_cluster("fake_clusterA"); + route->set_remove_prefix(true); + } + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + + std::string key("abc:bar"); + EXPECT_EQ(upstream_a, router.upstreamPool(key)); + EXPECT_EQ(":bar", key); +} + +TEST(PrefixRoutesTest, RoutedToShortestPrefix) { + auto upstream_b = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", upstream_b); + + PrefixRoutes router(createPrefixRoutes(), std::move(upstreams)); + + std::string key("a:bar"); + EXPECT_EQ(upstream_b, router.upstreamPool(key)); + EXPECT_EQ("a:bar", key); +} + +TEST(PrefixRoutesTest, DifferentPrefixesSameUpstream) { + auto upstream_b = std::make_shared(); + + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", upstream_b); + + auto prefix_routes = createPrefixRoutes(); + + { + auto* route = prefix_routes.mutable_routes()->Add(); + route->set_prefix("also_route_to_b"); + route->set_cluster("fake_clusterB"); + } + + PrefixRoutes router(prefix_routes, std::move(upstreams)); + + std::string key1("a:bar"); + EXPECT_EQ(upstream_b, router.upstreamPool(key1)); + + std::string key2("also_route_to_b:bar"); + EXPECT_EQ(upstream_b, router.upstreamPool(key2)); +} + +TEST(PrefixRoutesTest, DuplicatePrefix) { + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", std::make_shared()); + upstreams.emplace("this_will_throw", std::make_shared()); + + auto prefix_routes = createPrefixRoutes(); + + { + auto* route = prefix_routes.mutable_routes()->Add(); + route->set_prefix("ab"); + route->set_cluster("this_will_throw"); + } + + EXPECT_THROW_WITH_MESSAGE(PrefixRoutes router(prefix_routes, std::move(upstreams)), + EnvoyException, "prefix `ab` already exists.") +} + +} // namespace RedisProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy From 3a596a4b0f8a8c3e60ff6240508a1d7395a9bacd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dan=20No=C3=A9?= Date: Tue, 16 Apr 2019 14:29:29 -0400 Subject: [PATCH 130/165] Remove HeaderString::c_str() and migrate callers to getStringView() (#6564) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the `HeaderString::c_str()` API, and migrate all callers of it to `getStringView()` and `string_view` style usage (ie, `absl::string_view::find` instead of C style comparisons) wherever appropriate. Risk Level: Medium. No logic changes intended, but this is delicate and risky code and a large portion of the code base was touched. Testing: `bazel test //test/...` Docs Changes: None Release Notes: None Fixes #6494 Signed-off-by: Dan Noé --- include/envoy/http/header_map.h | 27 ++- .../common/access_log/access_log_formatter.cc | 2 +- source/common/access_log/access_log_impl.cc | 3 +- source/common/common/utility.h | 13 +- source/common/grpc/common.cc | 32 +-- .../common/grpc/google_async_client_impl.cc | 3 +- source/common/http/async_client_impl.cc | 2 +- source/common/http/conn_manager_impl.cc | 13 +- source/common/http/conn_manager_utility.cc | 3 +- source/common/http/header_map_impl.cc | 32 +-- source/common/http/header_map_impl.h | 6 +- source/common/http/header_utility.cc | 17 +- source/common/http/http1/codec_impl.cc | 24 +- source/common/http/http1/codec_impl.h | 7 + source/common/http/http2/codec_impl.cc | 11 +- source/common/http/http2/codec_impl.h | 2 +- source/common/http/utility.cc | 46 ++-- source/common/http/utility.h | 7 +- source/common/router/config_impl.cc | 47 ++-- source/common/router/retry_state_impl.cc | 9 +- source/common/router/router.cc | 19 +- source/common/router/router_ratelimit.cc | 3 +- source/common/router/shadow_writer_impl.cc | 7 +- source/common/runtime/uuid_util.cc | 2 +- source/common/runtime/uuid_util.h | 4 +- source/common/tracing/http_tracer_impl.cc | 22 +- source/common/upstream/health_checker_impl.cc | 3 +- .../common/upstream/original_dst_cluster.cc | 6 +- .../http_grpc/grpc_access_log_impl.cc | 33 ++- .../common/ext_authz/ext_authz_http_impl.cc | 7 +- .../filters/http/common/aws/utility.cc | 8 +- .../filters/http/cors/cors_filter.cc | 6 +- .../http/dynamo/dynamo_request_parser.cc | 2 +- .../filters/http/fault/fault_filter.cc | 6 +- .../filters/http/fault/fault_filter.h | 4 +- .../grpc_http1_bridge/http1_bridge_filter.cc | 4 +- .../http/grpc_http1_reverse_bridge/filter.cc | 5 +- .../json_transcoder_filter.cc | 4 +- .../filters/http/grpc_web/grpc_web_filter.cc | 22 +- .../filters/http/grpc_web/grpc_web_filter.h | 2 +- .../filters/http/gzip/gzip_filter.cc | 31 +-- .../filters/http/jwt_authn/extractor.cc | 2 +- .../filters/http/jwt_authn/matcher.cc | 10 +- .../extensions/filters/http/lua/lua_filter.cc | 12 +- .../extensions/filters/http/lua/wrappers.cc | 9 +- .../filters/http/squash/squash_filter.cc | 2 +- .../filters/http/tap/tap_config_impl.cc | 4 +- .../router/router_ratelimit_impl.cc | 3 +- .../thrift_proxy/twitter_protocol_impl.cc | 14 +- .../common/ot/opentracing_driver_impl.cc | 12 +- .../tracers/zipkin/span_context_extractor.cc | 10 +- .../tracers/zipkin/zipkin_tracer_impl.cc | 9 +- .../config/http_subscription_test_harness.h | 10 +- .../config/subscription_factory_test.cc | 7 +- test/common/grpc/common_test.cc | 82 +++---- test/common/http/async_client_impl_test.cc | 2 +- test/common/http/conn_manager_impl_test.cc | 50 ++--- test/common/http/conn_manager_utility_test.cc | 2 +- test/common/http/header_map_impl_fuzz_test.cc | 4 +- test/common/http/header_map_impl_test.cc | 206 ++++++++++-------- test/common/http/header_utility_test.cc | 4 +- test/common/http/utility_test.cc | 18 +- test/common/router/header_formatter_test.cc | 6 +- test/common/router/router_test.cc | 8 +- test/common/router/shadow_writer_impl_test.cc | 2 +- .../upstream/health_checker_impl_test.cc | 54 ++--- .../grpc_access_log_integration_test.cc | 13 +- .../buffer/buffer_filter_integration_test.cc | 6 +- .../http/common/aws/signer_impl_test.cc | 69 +++--- .../grpc_json_transcoder_integration_test.cc | 5 +- .../json_transcoder_filter_test.cc | 2 +- .../http/grpc_web/grpc_web_filter_test.cc | 23 +- .../http/gzip/gzip_filter_integration_test.cc | 28 +-- .../http/health_check/health_check_test.cc | 25 ++- .../http/jwt_authn/filter_integration_test.cc | 20 +- .../filters/http/lua/lua_integration_test.cc | 96 ++++---- .../http/rbac/rbac_filter_integration_test.cc | 16 +- .../squash/squash_filter_integration_test.cc | 21 +- .../http/tap/tap_filter_integration_test.cc | 2 +- .../metrics_service_integration_test.cc | 10 +- .../datadog/datadog_tracer_impl_test.cc | 5 +- .../lightstep/lightstep_tracer_impl_test.cc | 48 ++-- .../tracers/zipkin/zipkin_tracer_impl_test.cc | 22 +- .../tls/integration/ssl_integration_test.cc | 6 +- test/fuzz/utility.h | 4 +- test/integration/cds_integration_test.cc | 4 +- test/integration/hds_integration_test.cc | 14 +- test/integration/http2_integration_test.cc | 48 ++-- .../http2_upstream_integration_test.cc | 8 +- test/integration/http_integration.cc | 56 ++--- .../idle_timeout_integration_test.cc | 22 +- test/integration/integration_admin_test.cc | 140 ++++++------ test/integration/integration_test.cc | 30 +-- .../load_stats_integration_test.cc | 15 +- test/integration/overload_integration_test.cc | 14 +- test/integration/protocol_integration_test.cc | 62 +++--- .../integration/ratelimit_integration_test.cc | 27 +-- test/integration/redirect_integration_test.cc | 18 +- .../sds_dynamic_integration_test.cc | 2 +- test/integration/server.cc | 2 +- .../integration/websocket_integration_test.cc | 12 +- test/integration/xfcc_integration_test.cc | 4 +- test/test_common/printers.cc | 3 +- test/test_common/utility.cc | 6 +- test/test_common/utility.h | 3 +- 105 files changed, 1065 insertions(+), 894 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 8a094c1d793ce..eef9498593ec3 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -118,11 +118,6 @@ class HeaderString { */ char* buffer() { return buffer_.dynamic_; } - /** - * @return a null terminated C string. - */ - const char* c_str() const { return buffer_.ref_; } - /** * @return an absl::string_view. */ @@ -143,14 +138,24 @@ class HeaderString { /** * @return whether a substring exists in the string. + * + * TODO(dnoe): Eliminate this by migrating callers to use string_view find + * directly (#6580) */ - bool find(const char* str) const { return strstr(c_str(), str); } + bool find(const char* str) const { + return getStringView().find(absl::string_view(str)) != absl::string_view::npos; + } /** * Set the value of the string by copying data into it. This overwrites any existing string. */ void setCopy(const char* data, uint32_t size); + /** + * Set the value of the string by copying data into it. This overwrites any existing string. + */ + void setCopy(absl::string_view view); + /** * Set the value of the string to an integer. This overwrites any existing string. */ @@ -173,8 +178,10 @@ class HeaderString { */ Type type() const { return type_; } - bool operator==(const char* rhs) const { return 0 == strcmp(c_str(), rhs); } - bool operator!=(const char* rhs) const { return 0 != strcmp(c_str(), rhs); } + bool operator==(const char* rhs) const { return getStringView() == absl::string_view(rhs); } + bool operator==(absl::string_view rhs) const { return getStringView() == rhs; } + bool operator!=(const char* rhs) const { return getStringView() != absl::string_view(rhs); } + bool operator!=(absl::string_view rhs) const { return getStringView() != rhs; } private: union Buffer { @@ -524,8 +531,8 @@ class HeaderMap { friend std::ostream& operator<<(std::ostream& os, const HeaderMap& headers) { headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - *static_cast(context) - << "'" << header.key().c_str() << "', '" << header.value().c_str() << "'\n"; + *static_cast(context) << "'" << header.key().getStringView() << "', '" + << header.value().getStringView() << "'\n"; return HeaderMap::Iterate::Continue; }, &os); diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index 15a9bdb825b99..6f51ee2cfc615 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -434,7 +434,7 @@ std::string HeaderFormatter::format(const Http::HeaderMap& headers) const { if (!header) { header_value_string = UnspecifiedValueString; } else { - header_value_string = header->value().c_str(); + header_value_string = std::string(header->value().getStringView()); } if (max_length_ && header_value_string.length() > max_length_.value()) { diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index da75fedb5fb78..852b6e4c47461 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -118,9 +118,10 @@ bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo&, const Http::HeaderMa const Http::HeaderMap&, const Http::HeaderMap&) { const Http::HeaderEntry* uuid = request_header.RequestId(); uint64_t random_value; + // TODO(dnoe): Migrate uuidModBy to take string_view (#6580) if (use_independent_randomness_ || uuid == nullptr || !UuidUtils::uuidModBy( - uuid->value().c_str(), random_value, + std::string(uuid->value().getStringView()), random_value, ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent_.denominator()))) { random_value = random_.random(); } diff --git a/source/common/common/utility.h b/source/common/common/utility.h index 9eaddb7f64da1..11bf3735314f2 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -572,14 +572,13 @@ template struct TrieLookupTable { * exists. * @return false when a value already exists for the given key. */ - bool add(const char* key, Value value, bool overwrite_existing = true) { + bool add(absl::string_view key, Value value, bool overwrite_existing = true) { TrieEntry* current = &root_; - while (uint8_t c = *key) { + for (uint8_t c : key) { if (!current->entries_[c]) { current->entries_[c] = std::make_unique>(); } current = current->entries_[c].get(); - key++; } if (current->value_ && !overwrite_existing) { return false; @@ -593,13 +592,11 @@ template struct TrieLookupTable { * @param key the key used to find. * @return the value associated with the key. */ - Value find(const char* key) const { + Value find(absl::string_view key) const { const TrieEntry* current = &root_; - while (uint8_t c = *key) { + for (uint8_t c : key) { current = current->entries_[c].get(); - if (current) { - key++; - } else { + if (current == nullptr) { return nullptr; } } diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index ee123d02ff01d..40e1077b630a5 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -31,7 +31,8 @@ bool Common::hasGrpcContentType(const Http::HeaderMap& headers) { absl::StartsWith(content_type->value().getStringView(), Http::Headers::get().ContentTypeValues.Grpc) && (content_type->value().size() == Http::Headers::get().ContentTypeValues.Grpc.size() || - content_type->value().c_str()[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); + content_type->value() + .getStringView()[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); } bool Common::isGrpcResponseHeader(const Http::HeaderMap& headers, bool end_stream) { @@ -53,11 +54,13 @@ void Common::chargeStat(const Upstream::ClusterInfo& cluster, const std::string& } cluster.statsScope() .counter(fmt::format("{}.{}.{}.{}", protocol, grpc_service, grpc_method, - grpc_status->value().c_str())) + grpc_status->value().getStringView())) .inc(); uint64_t grpc_status_code; + const std::string grpc_status_string(grpc_status->value().getStringView()); + // TODO(dnoe): Migrate to pure string_view (#6580) const bool success = - StringUtil::atoull(grpc_status->value().c_str(), grpc_status_code) && grpc_status_code == 0; + StringUtil::atoull(grpc_status_string.c_str(), grpc_status_code) && grpc_status_code == 0; chargeStat(cluster, protocol, grpc_service, grpc_method, success); } @@ -85,7 +88,9 @@ absl::optional Common::getGrpcStatus(const Http::HeaderMap& if (!grpc_status_header || grpc_status_header->value().empty()) { return absl::optional(); } - if (!StringUtil::atoull(grpc_status_header->value().c_str(), grpc_status_code) || + // TODO(dnoe): Migrate to pure string_view (#6580) + std::string grpc_status_header_string(grpc_status_header->value().getStringView()); + if (!StringUtil::atoull(grpc_status_header_string.c_str(), grpc_status_code) || grpc_status_code > Status::GrpcStatus::MaximumValid) { return absl::optional(Status::GrpcStatus::InvalidCode); } @@ -94,15 +99,15 @@ absl::optional Common::getGrpcStatus(const Http::HeaderMap& std::string Common::getGrpcMessage(const Http::HeaderMap& trailers) { const auto entry = trailers.GrpcMessage(); - return entry ? entry->value().c_str() : EMPTY_STRING; + return entry ? std::string(entry->value().getStringView()) : EMPTY_STRING; } bool Common::resolveServiceAndMethod(const Http::HeaderEntry* path, std::string* service, std::string* method) { - if (path == nullptr || path->value().c_str() == nullptr) { + if (path == nullptr) { return false; } - const auto parts = StringUtil::splitToken(path->value().c_str(), "/"); + const auto parts = StringUtil::splitToken(path->value().getStringView(), "/"); if (parts.size() != 2) { return false; } @@ -138,8 +143,9 @@ std::chrono::milliseconds Common::getGrpcTimeout(Http::HeaderMap& request_header Http::HeaderEntry* header_grpc_timeout_entry = request_headers.GrpcTimeout(); if (header_grpc_timeout_entry) { uint64_t grpc_timeout; - const char* unit = - StringUtil::strtoull(header_grpc_timeout_entry->value().c_str(), grpc_timeout); + // TODO(dnoe): Migrate to pure string_view (#6580) + std::string grpc_timeout_string(header_grpc_timeout_entry->value().getStringView()); + const char* unit = StringUtil::strtoull(grpc_timeout_string.c_str(), grpc_timeout); if (unit != nullptr && *unit != '\0') { switch (*unit) { case 'H': @@ -231,9 +237,7 @@ void Common::checkForHeaderOnlyError(Http::Message& http_response) { throw Exception(absl::optional(), "bad grpc-status header"); } - const Http::HeaderEntry* grpc_status_message = http_response.headers().GrpcMessage(); - throw Exception(grpc_status_code.value(), - grpc_status_message ? grpc_status_message->value().c_str() : EMPTY_STRING); + throw Exception(grpc_status_code.value(), Common::getGrpcMessage(http_response.headers())); } void Common::validateResponse(Http::Message& http_response) { @@ -255,9 +259,7 @@ void Common::validateResponse(Http::Message& http_response) { } if (grpc_status_code.value() != 0) { - const Http::HeaderEntry* grpc_status_message = http_response.trailers()->GrpcMessage(); - throw Exception(grpc_status_code.value(), - grpc_status_message ? grpc_status_message->value().c_str() : EMPTY_STRING); + throw Exception(grpc_status_code.value(), Common::getGrpcMessage(*http_response.trailers())); } } diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index a9798a4971d3e..3a8a180c70ab0 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -156,7 +156,8 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { initial_metadata.iterate( [](const Http::HeaderEntry& header, void* ctxt) { auto* client_context = static_cast(ctxt); - client_context->AddMetadata(header.key().c_str(), header.value().c_str()); + client_context->AddMetadata(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &ctxt_); diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index d2de75c1b69e0..0ff58f7e69d62 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -113,7 +113,7 @@ void AsyncStreamImpl::encodeTrailers(HeaderMapPtr&& trailers) { } void AsyncStreamImpl::sendHeaders(HeaderMap& headers, bool end_stream) { - if (Http::Headers::get().MethodValues.Head == headers.Method()->value().c_str()) { + if (Http::Headers::get().MethodValues.Head == headers.Method()->value().getStringView()) { is_head_request_ = true; } diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 764dc0e179dcb..286ee7855f67b 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -573,7 +573,8 @@ const Network::Connection* ConnectionManagerImpl::ActiveStream::connection() { // e.g. many early returns do not currently handle connection: close properly. void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, bool end_stream) { request_headers_ = std::move(headers); - if (Http::Headers::get().MethodValues.Head == request_headers_->Method()->value().c_str()) { + if (Http::Headers::get().MethodValues.Head == + request_headers_->Method()->value().getStringView()) { is_head_request_ = true; } ENVOY_STREAM_LOG(debug, "request headers complete (end_stream={}):\n{}", *this, end_stream, @@ -661,7 +662,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, // when the allow_absolute_url flag is enabled on the HCM. // https://tools.ietf.org/html/rfc7230#section-5.3 We also need to check for the existence of // :path because CONNECT does not have a path, and we don't support that currently. - if (!request_headers_->Path() || request_headers_->Path()->value().c_str()[0] != '/') { + if (!request_headers_->Path() || request_headers_->Path()->value().getStringView().empty() || + request_headers_->Path()->value().getStringView()[0] != '/') { connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, is_head_request_, absl::nullopt); @@ -779,7 +781,8 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { // should be used to override the active span's operation. if (req_operation_override) { if (!req_operation_override->value().empty()) { - active_span_->setOperation(req_operation_override->value().c_str()); + // TODO(dnoe): Migrate setOperation to take string_view (#6580) + active_span_->setOperation(std::string(req_operation_override->value().getStringView())); // Clear the decorated operation so won't be used in the response header, as // it has been overridden by the inbound decorator operation request header. @@ -1289,7 +1292,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte // should be used to override the active span's operation. if (resp_operation_override) { if (!resp_operation_override->value().empty() && active_span_) { - active_span_->setOperation(resp_operation_override->value().c_str()); + active_span_->setOperation(std::string(resp_operation_override->value().getStringView())); } // Remove header so not propagated to service. headers.removeEnvoyDecoratorOperation(); @@ -1588,7 +1591,7 @@ bool ConnectionManagerImpl::ActiveStream::createFilterChain() { } if (connection_manager_.config_.filterFactory().createUpgradeFilterChain( - upgrade->value().c_str(), upgrade_map, *this)) { + upgrade->value().getStringView(), upgrade_map, *this)) { state_.successful_upgrade_ = true; connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc(); connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc(); diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 164b8712c2954..1243d8339fdb3 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -223,7 +223,8 @@ void ConnectionManagerUtility::mutateTracingRequestHeader(HeaderMap& request_hea return; } - std::string x_request_id = request_headers.RequestId()->value().c_str(); + // TODO(dnoe): Migrate uuidModBy and others below to take string_view (#6580) + std::string x_request_id(request_headers.RequestId()->value().getStringView()); uint64_t result; // Skip if x-request-id is corrupted. if (!UuidUtils::uuidModBy(x_request_id, result, 10000)) { diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 66500d425383d..20747d7e6efe6 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -213,6 +213,10 @@ void HeaderString::setCopy(const char* data, uint32_t size) { ASSERT(valid()); } +void HeaderString::setCopy(absl::string_view view) { + this->setCopy(view.data(), static_cast(view.size())); +} + void HeaderString::setInteger(uint64_t value) { switch (type_) { case Type::Reference: { @@ -272,7 +276,7 @@ void HeaderMapImpl::HeaderEntryImpl::value(absl::string_view value) { void HeaderMapImpl::HeaderEntryImpl::value(uint64_t value) { value_.setInteger(value); } void HeaderMapImpl::HeaderEntryImpl::value(const HeaderEntry& header) { - value(header.value().c_str(), header.value().size()); + value(header.value().getStringView()); } #define INLINE_HEADER_STATIC_MAP_ENTRY(name) \ @@ -324,9 +328,9 @@ void HeaderMapImpl::copyFrom(const HeaderMap& header_map) { [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { // TODO(mattklein123) PERF: Avoid copying here if not necessary. HeaderString key_string; - key_string.setCopy(header.key().c_str(), header.key().size()); + key_string.setCopy(header.key().getStringView()); HeaderString value_string; - value_string.setCopy(header.value().c_str(), header.value().size()); + value_string.setCopy(header.value().getStringView()); static_cast(context)->addViaMove(std::move(key_string), std::move(value_string)); @@ -341,7 +345,7 @@ bool HeaderMapImpl::operator==(const HeaderMapImpl& rhs) const { } for (auto i = headers_.begin(), j = rhs.headers_.begin(); i != headers_.end(); ++i, ++j) { - if (i->key() != j->key().c_str() || i->value() != j->value().c_str()) { + if (i->key() != j->key().getStringView() || i->value() != j->value().getStringView()) { return false; } } @@ -352,14 +356,14 @@ bool HeaderMapImpl::operator==(const HeaderMapImpl& rhs) const { bool HeaderMapImpl::operator!=(const HeaderMapImpl& rhs) const { return !operator==(rhs); } void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { - EntryCb cb = ConstSingleton::get().find(key.c_str()); + EntryCb cb = ConstSingleton::get().find(key.getStringView()); if (cb) { key.clear(); StaticLookupResponse ref_lookup_response = cb(*this); if (*ref_lookup_response.entry_ == nullptr) { maybeCreateInline(ref_lookup_response.entry_, *ref_lookup_response.key_, std::move(value)); } else { - appendToHeader((*ref_lookup_response.entry_)->value(), value.c_str()); + appendToHeader((*ref_lookup_response.entry_)->value(), value.getStringView()); value.clear(); } } else { @@ -371,9 +375,9 @@ void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { void HeaderMapImpl::addViaMove(HeaderString&& key, HeaderString&& value) { // If this is an inline header, we can't addViaMove, because we'll overwrite // the existing value. - auto* entry = getExistingInline(key.c_str()); + auto* entry = getExistingInline(key.getStringView()); if (entry != nullptr) { - appendToHeader(entry->value(), value.c_str()); + appendToHeader(entry->value(), value.getStringView()); key.clear(); value.clear(); } else { @@ -404,7 +408,7 @@ void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, const std::strin } void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { - auto* entry = getExistingInline(key.get().c_str()); + auto* entry = getExistingInline(key.get()); if (entry != nullptr) { char buf[32]; StringUtil::itoa(buf, sizeof(buf), value); @@ -421,7 +425,7 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { } void HeaderMapImpl::addCopy(const LowerCaseString& key, const std::string& value) { - auto* entry = getExistingInline(key.get().c_str()); + auto* entry = getExistingInline(key.get()); if (entry != nullptr) { appendToHeader(entry->value(), value); return; @@ -499,7 +503,7 @@ void HeaderMapImpl::iterateReverse(ConstIterateCb cb, void* context) const { HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key, const HeaderEntry** entry) const { - EntryCb cb = ConstSingleton::get().find(key.get().c_str()); + EntryCb cb = ConstSingleton::get().find(key.get()); if (cb) { // The accessor callbacks for predefined inline headers take a HeaderMapImpl& as an argument; // even though we don't make any modifications, we need to cast_cast in order to use the @@ -521,7 +525,7 @@ HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key, } void HeaderMapImpl::remove(const LowerCaseString& key) { - EntryCb cb = ConstSingleton::get().find(key.get().c_str()); + EntryCb cb = ConstSingleton::get().find(key.get()); if (cb) { StaticLookupResponse ref_lookup_response = cb(*this); removeInline(ref_lookup_response.entry_); @@ -542,7 +546,7 @@ void HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { if (to_remove) { // If this header should be removed, make sure any references in the // static lookup table are cleared as well. - EntryCb cb = ConstSingleton::get().find(entry.key().c_str()); + EntryCb cb = ConstSingleton::get().find(entry.key().getStringView()); if (cb) { StaticLookupResponse ref_lookup_response = cb(*this); if (ref_lookup_response.entry_) { @@ -580,7 +584,7 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl return **entry; } -HeaderMapImpl::HeaderEntryImpl* HeaderMapImpl::getExistingInline(const char* key) { +HeaderMapImpl::HeaderEntryImpl* HeaderMapImpl::getExistingInline(absl::string_view key) { EntryCb cb = ConstSingleton::get().find(key); if (cb) { StaticLookupResponse ref_lookup_response = cb(*this); diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 3bdbd1cd206ee..ffa2e069f33d6 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -138,7 +138,9 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { public: HeaderList() : pseudo_headers_end_(headers_.end()) {} - template bool isPseudoHeader(const Key& key) { return key.c_str()[0] == ':'; } + template bool isPseudoHeader(const Key& key) { + return !key.getStringView().empty() && key.getStringView()[0] == ':'; + } template std::list::iterator insert(Key&& key, Value&&... value) { @@ -189,7 +191,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key); HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key, HeaderString&& value); - HeaderEntryImpl* getExistingInline(const char* key); + HeaderEntryImpl* getExistingInline(absl::string_view key); void removeInline(HeaderEntryImpl** entry); diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index d0cea22f11dd0..3a0cd2c05c0aa 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -87,16 +87,19 @@ bool HeaderUtility::matchHeaders(const Http::HeaderMap& request_headers, } bool match; + const absl::string_view header_view = header->value().getStringView(); switch (header_data.header_match_type_) { case HeaderMatchType::Value: - match = header_data.value_.empty() || header->value() == header_data.value_.c_str(); + match = header_data.value_.empty() || header_view == header_data.value_; break; case HeaderMatchType::Regex: - match = std::regex_match(header->value().c_str(), header_data.regex_pattern_); + match = std::regex_match(header_view.begin(), header_view.end(), header_data.regex_pattern_); break; case HeaderMatchType::Range: { int64_t header_value = 0; - match = StringUtil::atoll(header->value().c_str(), header_value, 10) && + // TODO(dnoe): Migrate to pure string_view to eliminate std:string instance (#6580) + const std::string header_string(header_view); + match = StringUtil::atoll(header_string.c_str(), header_value, 10) && header_value >= header_data.range_.start() && header_value < header_data.range_.end(); break; } @@ -104,10 +107,10 @@ bool HeaderUtility::matchHeaders(const Http::HeaderMap& request_headers, match = true; break; case HeaderMatchType::Prefix: - match = absl::StartsWith(header->value().getStringView(), header_data.value_); + match = absl::StartsWith(header_view, header_data.value_); break; case HeaderMatchType::Suffix: - match = absl::EndsWith(header->value().getStringView(), header_data.value_); + match = absl::EndsWith(header_view, header_data.value_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -120,9 +123,9 @@ void HeaderUtility::addHeaders(Http::HeaderMap& headers, const Http::HeaderMap& headers_to_add.iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { Http::HeaderString k; - k.setCopy(header.key().c_str(), header.key().size()); + k.setCopy(header.key().getStringView()); Http::HeaderString v; - v.setCopy(header.value().c_str(), header.value().size()); + v.setCopy(header.value().getStringView()); static_cast(context)->addViaMove(std::move(k), std::move(v)); return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index eda67fa94b5a0..ded79fb00a93c 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -42,6 +42,9 @@ void StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const c connection_.addCharToBuffer('\r'); connection_.addCharToBuffer('\n'); } +void StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) { + this->encodeHeader(key.data(), key.size(), value.data(), value.size()); +} void StreamEncoderImpl::encode100ContinueHeaders(const HeaderMap& headers) { ASSERT(headers.Status()->value() == "100"); @@ -54,11 +57,11 @@ void StreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) bool saw_content_length = false; headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - const char* key_to_use = header.key().c_str(); + absl::string_view key_to_use = header.key().getStringView(); uint32_t key_size_to_use = header.key().size(); // Translate :authority -> host so that upper layers do not need to deal with this. if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { - key_to_use = Headers::get().HostLegacy.get().c_str(); + key_to_use = absl::string_view(Headers::get().HostLegacy.get()); key_size_to_use = Headers::get().HostLegacy.get().size(); } @@ -67,8 +70,8 @@ void StreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) return HeaderMap::Iterate::Continue; } - static_cast(context)->encodeHeader( - key_to_use, key_size_to_use, header.value().c_str(), header.value().size()); + static_cast(context)->encodeHeader(key_to_use, + header.value().getStringView()); return HeaderMap::Iterate::Continue; }, this); @@ -265,14 +268,14 @@ void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_ if (!method || !path) { throw CodecClientException(":method and :path must be specified"); } - if (method->value() == Headers::get().MethodValues.Head.c_str()) { + if (method->value() == Headers::get().MethodValues.Head) { head_request_ = true; } connection_.onEncodeHeaders(headers); connection_.reserveBuffer(std::max(4096U, path->value().size() + 4096)); - connection_.copyToBuffer(method->value().c_str(), method->value().size()); + connection_.copyToBuffer(method->value().getStringView().data(), method->value().size()); connection_.addCharToBuffer(' '); - connection_.copyToBuffer(path->value().c_str(), path->value().size()); + connection_.copyToBuffer(path->value().getStringView().data(), path->value().size()); connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); StreamEncoderImpl::encodeHeaders(headers, end_stream); @@ -328,7 +331,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, http_parser_type void ConnectionImpl::completeLastHeader() { ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, - current_header_field_.c_str(), current_header_value_.c_str()); + current_header_field_.getStringView(), current_header_value_.getStringView()); if (!current_header_field_.empty()) { toLowerTable().toLowerCase(current_header_field_.buffer(), current_header_field_.size()); current_header_map_->addViaMove(std::move(current_header_field_), @@ -508,8 +511,9 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho bool is_connect = (method == HTTP_CONNECT); // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. - if (active_request_->request_url_.c_str()[0] == '/' || - ((method == HTTP_OPTIONS) && active_request_->request_url_.c_str()[0] == '*')) { + if (!active_request_->request_url_.getStringView().empty() && + (active_request_->request_url_.getStringView()[0] == '/' || + ((method == HTTP_OPTIONS) && active_request_->request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request_->request_url_)); return; } diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index c0e7bf43186b0..27b80e5e5b81b 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -68,6 +68,13 @@ class StreamEncoderImpl : public StreamEncoder, */ void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size); + /** + * Called to encode an individual header. + * @param key supplies the header to encode as a string_view. + * @param value supplies the value to encode as a string_view. + */ + void encodeHeader(absl::string_view key, absl::string_view value); + /** * Called to finalize a stream encode. */ diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index c1b429752f2bc..465a6734fcde0 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -33,7 +33,8 @@ bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderS cookies.append("; ", 2); } - cookies.append(value.c_str(), value.size()); + const absl::string_view value_view = value.getStringView(); + cookies.append(value_view.data(), value_view.size()); return true; } @@ -79,9 +80,11 @@ static void insertHeader(std::vector& headers, const HeaderEntry& he if (header.value().type() == HeaderString::Type::Reference) { flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE; } - headers.push_back({remove_const(header.key().c_str()), - remove_const(header.value().c_str()), header.key().size(), - header.value().size(), flags}); + const absl::string_view header_key = header.key().getStringView(); + const absl::string_view header_value = header.value().getStringView(); + headers.push_back({remove_const(header_key.data()), + remove_const(header_value.data()), header_key.size(), + header_value.size(), flags}); } void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 339f6ce288f0b..5c3057679a3c8 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -244,7 +244,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, nghttp2_data_provider* provider) override; void transformUpgradeFromH1toH2(HeaderMap& headers) override { - upgrade_type_ = headers.Upgrade()->value().c_str(); + upgrade_type_ = std::string(headers.Upgrade()->value().getStringView()); Http::Utility::transformUpgradeRequestFromH1toH2(headers); } void maybeTransformUpgradeFromH2ToH1() override { diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 92c787125c630..a2ec61275f5e2 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -88,8 +88,8 @@ void Utility::appendVia(HeaderMap& headers, const std::string& via) { std::string Utility::createSslRedirectPath(const HeaderMap& headers) { ASSERT(headers.Host()); ASSERT(headers.Path()); - return fmt::format("https://{}{}", headers.Host()->value().c_str(), - headers.Path()->value().c_str()); + return fmt::format("https://{}{}", headers.Host()->value().getStringView(), + headers.Path()->value().getStringView()); } Utility::QueryParams Utility::parseQueryString(absl::string_view url) { @@ -121,8 +121,14 @@ Utility::QueryParams Utility::parseQueryString(absl::string_view url) { return params; } -const char* Utility::findQueryStringStart(const HeaderString& path) { - return std::find(path.c_str(), path.c_str() + path.size(), '?'); +absl::string_view Utility::findQueryStringStart(const HeaderString& path) { + absl::string_view path_str = path.getStringView(); + size_t query_offset = path_str.find('?'); + if (query_offset == absl::string_view::npos) { + query_offset = path_str.length(); + } + path_str.remove_prefix(query_offset); + return path_str; } std::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) { @@ -138,13 +144,14 @@ std::string Utility::parseCookieValue(const HeaderMap& headers, const std::strin headers.iterateReverse( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { // Find the cookie headers in the request (typically, there's only one). - if (header.key() == Http::Headers::get().Cookie.get().c_str()) { + if (header.key() == Http::Headers::get().Cookie.get()) { + // Split the cookie header into individual cookies. - for (const auto s : StringUtil::splitToken(header.value().c_str(), ";")) { + for (const auto s : StringUtil::splitToken(header.value().getStringView(), ";")) { // Find the key part of the cookie (i.e. the name of the cookie). size_t first_non_space = s.find_first_not_of(" "); size_t equals_index = s.find('='); - if (equals_index == std::string::npos) { + if (equals_index == absl::string_view::npos) { // The cookie is malformed if it does not have an `=`. Continue // checking other cookies in this header. continue; @@ -206,15 +213,15 @@ bool Utility::hasSetCookie(const HeaderMap& headers, const std::string& key) { headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { // Find the set-cookie headers in the request - if (header.key() == Http::Headers::get().SetCookie.get().c_str()) { - const std::string value{header.value().c_str()}; + if (header.key() == Http::Headers::get().SetCookie.get()) { + const absl::string_view value{header.value().getStringView()}; const size_t equals_index = value.find('='); - if (equals_index == std::string::npos) { + if (equals_index == absl::string_view::npos) { // The cookie is malformed if it does not have an `=`. return HeaderMap::Iterate::Continue; } - std::string k = value.substr(0, equals_index); + absl::string_view k = value.substr(0, equals_index); State* state = static_cast(context); if (k == state->key_) { state->ret_ = true; @@ -231,7 +238,8 @@ bool Utility::hasSetCookie(const HeaderMap& headers, const std::string& key) { uint64_t Utility::getResponseStatus(const HeaderMap& headers) { const HeaderEntry* header = headers.Status(); uint64_t response_code; - if (!header || !StringUtil::atoull(headers.Status()->value().c_str(), response_code)) { + if (!header || !StringUtil::atoull(std::string(headers.Status()->value().getStringView()).c_str(), + response_code)) { throw CodecClientException(":status must be specified and a valid unsigned long"); } return response_code; @@ -247,7 +255,7 @@ bool Utility::isUpgrade(const HeaderMap& headers) { bool Utility::isH2UpgradeRequest(const HeaderMap& headers) { return headers.Method() && - headers.Method()->value().c_str() == Http::Headers::get().MethodValues.Connect && + headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect && headers.Protocol() && !headers.Protocol()->value().empty(); } @@ -349,7 +357,7 @@ Utility::getLastAddressFromXFF(const Http::HeaderMap& request_headers, uint32_t return {nullptr, false}; } - absl::string_view xff_string(xff_header->value().c_str(), xff_header->value().size()); + absl::string_view xff_string(xff_header->value().getStringView()); static const std::string separator(","); // Ignore the last num_to_skip addresses at the end of XFF. for (uint32_t i = 0; i < num_to_skip; i++) { @@ -473,13 +481,13 @@ void Utility::transformUpgradeRequestFromH1toH2(HeaderMap& headers) { const HeaderString& upgrade = headers.Upgrade()->value(); headers.insertMethod().value().setReference(Http::Headers::get().MethodValues.Connect); - headers.insertProtocol().value().setCopy(upgrade.c_str(), upgrade.size()); + headers.insertProtocol().value().setCopy(upgrade.getStringView()); headers.removeUpgrade(); headers.removeConnection(); // nghttp2 rejects upgrade requests/responses with content length, so strip // any unnecessary content length header. if (headers.ContentLength() != nullptr && - absl::string_view("0") == headers.ContentLength()->value().c_str()) { + headers.ContentLength()->value().getStringView() == "0") { headers.removeContentLength(); } } @@ -491,7 +499,7 @@ void Utility::transformUpgradeResponseFromH1toH2(HeaderMap& headers) { headers.removeUpgrade(); headers.removeConnection(); if (headers.ContentLength() != nullptr && - absl::string_view("0") == headers.ContentLength()->value().c_str()) { + headers.ContentLength()->value().getStringView() == "0") { headers.removeContentLength(); } } @@ -501,14 +509,14 @@ void Utility::transformUpgradeRequestFromH2toH1(HeaderMap& headers) { const HeaderString& protocol = headers.Protocol()->value(); headers.insertMethod().value().setReference(Http::Headers::get().MethodValues.Get); - headers.insertUpgrade().value().setCopy(protocol.c_str(), protocol.size()); + headers.insertUpgrade().value().setCopy(protocol.getStringView()); headers.insertConnection().value().setReference(Http::Headers::get().ConnectionValues.Upgrade); headers.removeProtocol(); } void Utility::transformUpgradeResponseFromH2toH1(HeaderMap& headers, absl::string_view upgrade) { if (getResponseStatus(headers) == 200) { - headers.insertUpgrade().value().setCopy(upgrade.data(), upgrade.size()); + headers.insertUpgrade().value().setCopy(upgrade); headers.insertConnection().value().setReference(Http::Headers::get().ConnectionValues.Upgrade); headers.insertStatus().value().setInteger(101); } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 5c4526ba9b3d2..24cb394c6fa32 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -70,10 +70,11 @@ QueryParams parseQueryString(absl::string_view url); /** * Finds the start of the query string in a path * @param path supplies a HeaderString& to search for the query string - * @return const char* a pointer to the beginning of the query string, or the end of the - * path if there is no query + * @return absl::string_view starting at the beginning of the query string, + * or a string_view starting at the end of the path if there was + * no query string. */ -const char* findQueryStringStart(const HeaderString& path); +absl::string_view findQueryStringStart(const HeaderString& path); /** * Parse a particular value out of a cookie diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index d17e0e56f4ca5..1b119c123328d 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -178,7 +178,7 @@ class HeaderHashMethod : public HashMethodImplBase { const Http::HeaderEntry* header = headers.get(header_name_); if (header) { - hash = HashUtil::xxHash64(header->value().c_str()); + hash = HashUtil::xxHash64(header->value().getStringView()); } return hash; } @@ -436,7 +436,7 @@ bool RouteEntryImplBase::matchRoute(const Http::HeaderMap& headers, uint64_t ran matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); if (!config_query_parameters_.empty()) { Http::Utility::QueryParams query_parameters = - Http::Utility::parseQueryString(headers.Path()->value().c_str()); + Http::Utility::parseQueryString(headers.Path()->value().getStringView()); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); } @@ -494,7 +494,7 @@ void RouteEntryImplBase::finalizePathHeader(Http::HeaderMap& headers, return; } - std::string path = std::string(headers.Path()->value().c_str(), headers.Path()->value().size()); + std::string path(headers.Path()->value().getStringView()); if (insert_envoy_original_path) { headers.insertEnvoyOriginalPath().value(*headers.Path()); } @@ -542,7 +542,7 @@ absl::string_view RouteEntryImplBase::processRequestHost(const Http::HeaderMap& std::string RouteEntryImplBase::newPath(const Http::HeaderMap& headers) const { ASSERT(isDirectResponse()); - const char* final_scheme; + absl::string_view final_scheme; absl::string_view final_host; absl::string_view final_port; absl::string_view final_path; @@ -550,10 +550,10 @@ std::string RouteEntryImplBase::newPath(const Http::HeaderMap& headers) const { if (!scheme_redirect_.empty()) { final_scheme = scheme_redirect_.c_str(); } else if (https_redirect_) { - final_scheme = Http::Headers::get().SchemeValues.Https.c_str(); + final_scheme = Http::Headers::get().SchemeValues.Https; } else { ASSERT(headers.ForwardedProto()); - final_scheme = headers.ForwardedProto()->value().c_str(); + final_scheme = headers.ForwardedProto()->value().getStringView(); } if (!port_redirect_.empty()) { @@ -573,7 +573,7 @@ std::string RouteEntryImplBase::newPath(const Http::HeaderMap& headers) const { final_path = path_redirect_.c_str(); } else { ASSERT(headers.Path()); - final_path = absl::string_view(headers.Path()->value().c_str(), headers.Path()->value().size()); + final_path = headers.Path()->value().getStringView(); if (strip_query_) { size_t path_end = final_path.find("?"); if (path_end != absl::string_view::npos) { @@ -677,7 +677,7 @@ RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& head const Http::HeaderEntry* entry = headers.get(cluster_header_name_); std::string final_cluster_name; if (entry) { - final_cluster_name = entry->value().c_str(); + final_cluster_name = std::string(entry->value().getStringView()); } // NOTE: Though we return a shared_ptr here, the current ownership model assumes that @@ -790,17 +790,17 @@ RouteConstSharedPtr PathRouteEntryImpl::matches(const Http::HeaderMap& headers, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, random_value)) { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); + absl::string_view query_string = Http::Utility::findQueryStringStart(path); size_t compare_length = path.size(); - if (query_string_start != nullptr) { - compare_length = query_string_start - path.c_str(); + if (query_string.length() > 0) { + compare_length = compare_length - query_string.length(); } if (compare_length != path_.size()) { return nullptr; } - absl::string_view path_section(path.c_str(), compare_length); + const absl::string_view path_section = path.getStringView().substr(0, compare_length); if (case_sensitive_) { if (absl::string_view(path_) == path_section) { return clusterEntry(headers, random_value); @@ -824,11 +824,14 @@ RegexRouteEntryImpl::RegexRouteEntryImpl(const VirtualHostImpl& vhost, void RegexRouteEntryImpl::rewritePathHeader(Http::HeaderMap& headers, bool insert_envoy_original_path) const { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); + const absl::string_view query_string = Http::Utility::findQueryStringStart(path); + const size_t path_string_length = path.size() - query_string.length(); // TODO(yuval-k): This ASSERT can happen if the path was changed by a filter without clearing the // route cache. We should consider if ASSERT-ing is the desired behavior in this case. - ASSERT(std::regex_match(path.c_str(), query_string_start, regex_)); - std::string matched_path(path.c_str(), query_string_start); + + const absl::string_view path_view = path.getStringView(); + ASSERT(std::regex_match(path_view.begin(), path_view.begin() + path_string_length, regex_)); + const std::string matched_path(path_view.begin(), path_view.begin() + path_string_length); finalizePathHeader(headers, matched_path, insert_envoy_original_path); } @@ -837,8 +840,10 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::HeaderMap& headers, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, random_value)) { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); - if (std::regex_match(path.c_str(), query_string_start, regex_)) { + const absl::string_view query_string = Http::Utility::findQueryStringStart(path); + if (std::regex_match(path.getStringView().begin(), + path.getStringView().begin() + (path.size() - query_string.length()), + regex_)) { return clusterEntry(headers, random_value); } } @@ -1026,7 +1031,8 @@ const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::HeaderMap& head // TODO (@rshriram) Match Origin header in WebSocket // request with VHost, using wildcard match - const std::string host = Http::LowerCaseString(headers.Host()->value().c_str()).get(); + const std::string host = + Http::LowerCaseString(std::string(headers.Host()->value().getStringView())).get(); const auto& iter = virtual_hosts_.find(host); if (iter != virtual_hosts_.end()) { return iter->second.get(); @@ -1069,9 +1075,10 @@ const VirtualCluster* VirtualHostImpl::virtualClusterFromEntries(const Http::HeaderMap& headers) const { for (const VirtualClusterEntry& entry : virtual_clusters_) { bool method_matches = - !entry.method_ || headers.Method()->value().c_str() == entry.method_.value(); + !entry.method_ || headers.Method()->value().getStringView() == entry.method_.value(); - if (method_matches && std::regex_match(headers.Path()->value().c_str(), entry.pattern_)) { + absl::string_view path_view = headers.Path()->value().getStringView(); + if (method_matches && std::regex_match(path_view.begin(), path_view.end(), entry.pattern_)) { return &entry; } } diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 84e1863176031..3d84a4208e260 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -66,15 +66,16 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::HeaderMap& // Merge in the headers. if (request_headers.EnvoyRetryOn()) { - retry_on_ |= parseRetryOn(request_headers.EnvoyRetryOn()->value().c_str()); + retry_on_ |= parseRetryOn(request_headers.EnvoyRetryOn()->value().getStringView()); } if (request_headers.EnvoyRetryGrpcOn()) { - retry_on_ |= parseRetryGrpcOn(request_headers.EnvoyRetryGrpcOn()->value().c_str()); + retry_on_ |= parseRetryGrpcOn(request_headers.EnvoyRetryGrpcOn()->value().getStringView()); } if (retry_on_ != 0 && request_headers.EnvoyMaxRetries()) { - const char* max_retries = request_headers.EnvoyMaxRetries()->value().c_str(); + // TODO(dnoe): Migrate to pure string_view (#6580) + const std::string max_retries(request_headers.EnvoyMaxRetries()->value().getStringView()); uint64_t temp; - if (StringUtil::atoull(max_retries, temp)) { + if (StringUtil::atoull(max_retries.c_str(), temp)) { // The max retries header takes precedence if set. retries_remaining_ = temp; } diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 72c9576a43d9f..3ea401798ad52 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -34,8 +34,9 @@ uint32_t getLength(const Buffer::Instance* instance) { return instance ? instanc bool schemeIsHttp(const Http::HeaderMap& downstream_headers, const Network::Connection& connection) { - if (downstream_headers.ForwardedProto() && downstream_headers.ForwardedProto()->value().c_str() == - Http::Headers::get().SchemeValues.Http) { + if (downstream_headers.ForwardedProto() && + downstream_headers.ForwardedProto()->value().getStringView() == + Http::Headers::get().SchemeValues.Http) { return true; } if (!connection.ssl()) { @@ -138,7 +139,9 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs(); uint64_t header_timeout; if (header_timeout_entry) { - if (StringUtil::atoull(header_timeout_entry->value().c_str(), header_timeout)) { + // TODO(dnoe): Migrate to pure string_view (#6580) + if (StringUtil::atoull(std::string(header_timeout_entry->value().getStringView()).c_str(), + header_timeout)) { timeout.global_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestTimeoutMs(); @@ -147,7 +150,9 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he // See if there is a per try/retry timeout. If it's >= global we just ignore it. Http::HeaderEntry* per_try_timeout_entry = request_headers.EnvoyUpstreamRequestPerTryTimeoutMs(); if (per_try_timeout_entry) { - if (StringUtil::atoull(per_try_timeout_entry->value().c_str(), header_timeout)) { + // TODO(dnoe): Migrate to pure string_view (#6580) + if (StringUtil::atoull(std::string(per_try_timeout_entry->value().getStringView()).c_str(), + header_timeout)) { timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs(); @@ -274,7 +279,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e if (!route_) { config_.stats_.no_route_.inc(); ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, - headers.Path()->value().c_str()); + headers.Path()->value().getStringView()); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); callbacks_->sendLocalReply(Http::Code::NotFound, "", nullptr, absl::nullopt); @@ -317,11 +322,11 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e // Set up stat prefixes, etc. request_vcluster_ = route_entry_->virtualCluster(headers); ENVOY_STREAM_LOG(debug, "cluster '{}' match for URL '{}'", *callbacks_, - route_entry_->clusterName(), headers.Path()->value().c_str()); + route_entry_->clusterName(), headers.Path()->value().getStringView()); const Http::HeaderEntry* request_alt_name = headers.EnvoyUpstreamAltStatName(); if (request_alt_name) { - alt_stat_prefix_ = std::string(request_alt_name->value().c_str()) + "."; + alt_stat_prefix_ = std::string(request_alt_name->value().getStringView()) + "."; headers.removeEnvoyUpstreamAltStatName(); } diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index f1f084265cd5a..f8827836367fa 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -40,7 +40,8 @@ bool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&, return false; } - descriptor.entries_.push_back({descriptor_key_, header_value->value().c_str()}); + descriptor.entries_.push_back( + {descriptor_key_, std::string(header_value->value().getStringView())}); return true; } diff --git a/source/common/router/shadow_writer_impl.cc b/source/common/router/shadow_writer_impl.cc index baea1de78fa68..1597fbb8486b7 100644 --- a/source/common/router/shadow_writer_impl.cc +++ b/source/common/router/shadow_writer_impl.cc @@ -23,11 +23,12 @@ void ShadowWriterImpl::shadow(const std::string& cluster, Http::MessagePtr&& req ASSERT(!request->headers().Host()->value().empty()); // Switch authority to add a shadow postfix. This allows upstream logging to make more sense. - auto parts = StringUtil::splitToken(request->headers().Host()->value().c_str(), ":"); + auto parts = StringUtil::splitToken(request->headers().Host()->value().getStringView(), ":"); ASSERT(!parts.empty() && parts.size() <= 2); request->headers().Host()->value( - parts.size() == 2 ? absl::StrJoin(parts, "-shadow:") - : absl::StrCat(request->headers().Host()->value().c_str(), "-shadow")); + parts.size() == 2 + ? absl::StrJoin(parts, "-shadow:") + : absl::StrCat(request->headers().Host()->value().getStringView(), "-shadow")); // This is basically fire and forget. We don't handle cancelling. cm_.httpAsyncClientForCluster(cluster).send( std::move(request), *this, Http::AsyncClient::RequestOptions().setTimeout(timeout)); diff --git a/source/common/runtime/uuid_util.cc b/source/common/runtime/uuid_util.cc index 7f52c7edb7257..615b5c8ce46d3 100644 --- a/source/common/runtime/uuid_util.cc +++ b/source/common/runtime/uuid_util.cc @@ -21,7 +21,7 @@ bool UuidUtils::uuidModBy(const std::string& uuid, uint64_t& out, uint64_t mod) return true; } -UuidTraceStatus UuidUtils::isTraceableUuid(const std::string& uuid) { +UuidTraceStatus UuidUtils::isTraceableUuid(absl::string_view uuid) { if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { return UuidTraceStatus::NoTrace; } diff --git a/source/common/runtime/uuid_util.h b/source/common/runtime/uuid_util.h index b3c85e8254b18..cf2450b4d5e84 100644 --- a/source/common/runtime/uuid_util.h +++ b/source/common/runtime/uuid_util.h @@ -2,6 +2,8 @@ #include +#include "absl/strings/string_view.h" + namespace Envoy { enum class UuidTraceStatus { NoTrace, Sampled, Client, Forced }; @@ -30,7 +32,7 @@ class UuidUtils { /** * @return status of the uuid, to differentiate reason for tracing, etc. */ - static UuidTraceStatus isTraceableUuid(const std::string& uuid); + static UuidTraceStatus isTraceableUuid(absl::string_view uuid); private: // Byte on this position has predefined value of 4 for UUID4. diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 1070ac3165cb2..809d43f543c70 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -23,13 +23,13 @@ static std::string buildResponseCode(const StreamInfo::StreamInfo& info) { } static std::string valueOrDefault(const Http::HeaderEntry* header, const char* default_value) { - return header ? header->value().c_str() : default_value; + return header ? std::string(header->value().getStringView()) : default_value; } static std::string buildUrl(const Http::HeaderMap& request_headers) { - std::string path = request_headers.EnvoyOriginalPath() - ? request_headers.EnvoyOriginalPath()->value().c_str() - : request_headers.Path()->value().c_str(); + std::string path(request_headers.EnvoyOriginalPath() + ? request_headers.EnvoyOriginalPath()->value().getStringView() + : request_headers.Path()->value().getStringView()); static const size_t max_path_length = 256; if (path.length() > max_path_length) { path = path.substr(0, max_path_length); @@ -64,9 +64,8 @@ Decision HttpTracerUtility::isTracing(const StreamInfo::StreamInfo& stream_info, return {Reason::NotTraceableRequestId, false}; } - // TODO PERF: Avoid copy. UuidTraceStatus trace_status = - UuidUtils::isTraceableUuid(request_headers.RequestId()->value().c_str()); + UuidUtils::isTraceableUuid(request_headers.RequestId()->value().getStringView()); switch (trace_status) { case UuidTraceStatus::Client: @@ -128,10 +127,11 @@ void HttpTracerUtility::finalizeSpan(Span& span, const Http::HeaderMap* request_ if (request_headers) { if (request_headers->RequestId()) { span.setTag(Tracing::Tags::get().GuidXRequestId, - std::string(request_headers->RequestId()->value().c_str())); + std::string(request_headers->RequestId()->value().getStringView())); } span.setTag(Tracing::Tags::get().HttpUrl, buildUrl(*request_headers)); - span.setTag(Tracing::Tags::get().HttpMethod, request_headers->Method()->value().c_str()); + span.setTag(Tracing::Tags::get().HttpMethod, + std::string(request_headers->Method()->value().getStringView())); span.setTag(Tracing::Tags::get().DownstreamCluster, valueOrDefault(request_headers->EnvoyDownstreamServiceCluster(), "-")); span.setTag(Tracing::Tags::get().UserAgent, valueOrDefault(request_headers->UserAgent(), "-")); @@ -140,14 +140,14 @@ void HttpTracerUtility::finalizeSpan(Span& span, const Http::HeaderMap* request_ if (request_headers->ClientTraceId()) { span.setTag(Tracing::Tags::get().GuidXClientTraceId, - std::string(request_headers->ClientTraceId()->value().c_str())); + std::string(request_headers->ClientTraceId()->value().getStringView())); } // Build tags based on the custom headers. for (const Http::LowerCaseString& header : tracing_config.requestHeadersForTags()) { const Http::HeaderEntry* entry = request_headers->get(header); if (entry) { - span.setTag(header.get(), entry->value().c_str()); + span.setTag(header.get(), std::string(entry->value().getStringView())); } } } @@ -184,7 +184,7 @@ SpanPtr HttpTracerImpl::startSpan(const Config& config, Http::HeaderMap& request if (config.operationName() == OperationName::Egress) { span_name.append(" "); - span_name.append(request_headers.Host()->value().c_str()); + span_name.append(std::string(request_headers.Host()->value().getStringView())); } SpanPtr active_span = driver_->startSpan(config, request_headers, span_name, diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 4f9b3d110bc04..a8a3f8a77df52 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -236,7 +236,8 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { parent_.stats_.verify_cluster_.inc(); std::string service_cluster_healthchecked = response_headers_->EnvoyUpstreamHealthCheckedCluster() - ? response_headers_->EnvoyUpstreamHealthCheckedCluster()->value().c_str() + ? std::string( + response_headers_->EnvoyUpstreamHealthCheckedCluster()->value().getStringView()) : EMPTY_STRING; if (service_cluster_healthchecked.find(parent_.service_name_.value()) == 0) { diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index a523d2453281f..f2a0af8911efc 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -117,8 +117,10 @@ OriginalDstCluster::LoadBalancer::requestOverrideHost(LoadBalancerContext* conte const Http::HeaderMap* downstream_headers = context->downstreamHeaders(); if (downstream_headers && downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost) != nullptr) { - const std::string& request_override_host = - downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost)->value().c_str(); + const std::string request_override_host( + downstream_headers->get(Http::Headers::get().EnvoyOriginalDstHost) + ->value() + .getStringView()); try { request_host = Network::Utility::parseInternetAddressAndPort(request_override_host, false); ENVOY_LOG(debug, "Using request override host {}.", request_override_host); diff --git a/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc index f32feaa20a13c..24132ccf2081f 100644 --- a/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc @@ -306,28 +306,34 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, // TODO(mattklein123): Populate port field. auto* request_properties = log_entry->mutable_request(); if (request_headers->Scheme() != nullptr) { - request_properties->set_scheme(request_headers->Scheme()->value().c_str()); + request_properties->set_scheme(std::string(request_headers->Scheme()->value().getStringView())); } if (request_headers->Host() != nullptr) { - request_properties->set_authority(request_headers->Host()->value().c_str()); + request_properties->set_authority( + std::string(request_headers->Host()->value().getStringView())); } if (request_headers->Path() != nullptr) { - request_properties->set_path(request_headers->Path()->value().c_str()); + request_properties->set_path(std::string(request_headers->Path()->value().getStringView())); } if (request_headers->UserAgent() != nullptr) { - request_properties->set_user_agent(request_headers->UserAgent()->value().c_str()); + request_properties->set_user_agent( + std::string(request_headers->UserAgent()->value().getStringView())); } if (request_headers->Referer() != nullptr) { - request_properties->set_referer(request_headers->Referer()->value().c_str()); + request_properties->set_referer( + std::string(request_headers->Referer()->value().getStringView())); } if (request_headers->ForwardedFor() != nullptr) { - request_properties->set_forwarded_for(request_headers->ForwardedFor()->value().c_str()); + request_properties->set_forwarded_for( + std::string(request_headers->ForwardedFor()->value().getStringView())); } if (request_headers->RequestId() != nullptr) { - request_properties->set_request_id(request_headers->RequestId()->value().c_str()); + request_properties->set_request_id( + std::string(request_headers->RequestId()->value().getStringView())); } if (request_headers->EnvoyOriginalPath() != nullptr) { - request_properties->set_original_path(request_headers->EnvoyOriginalPath()->value().c_str()); + request_properties->set_original_path( + std::string(request_headers->EnvoyOriginalPath()->value().getStringView())); } request_properties->set_request_headers_bytes(request_headers->byteSize()); request_properties->set_request_body_bytes(stream_info.bytesReceived()); @@ -335,7 +341,7 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, envoy::api::v2::core::RequestMethod method = envoy::api::v2::core::RequestMethod::METHOD_UNSPECIFIED; envoy::api::v2::core::RequestMethod_Parse( - std::string(request_headers->Method()->value().c_str()), &method); + std::string(request_headers->Method()->value().getStringView()), &method); request_properties->set_request_method(method); } if (!request_headers_to_log_.empty()) { @@ -344,7 +350,8 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, for (const auto& header : request_headers_to_log_) { const Http::HeaderEntry* entry = request_headers->get(header); if (entry != nullptr) { - logged_headers->insert({header.get(), ProtobufTypes::String(entry->value().c_str())}); + logged_headers->insert( + {header.get(), ProtobufTypes::String(entry->value().getStringView())}); } } } @@ -362,7 +369,8 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, for (const auto& header : response_headers_to_log_) { const Http::HeaderEntry* entry = response_headers->get(header); if (entry != nullptr) { - logged_headers->insert({header.get(), ProtobufTypes::String(entry->value().c_str())}); + logged_headers->insert( + {header.get(), ProtobufTypes::String(entry->value().getStringView())}); } } } @@ -373,7 +381,8 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, for (const auto& header : response_trailers_to_log_) { const Http::HeaderEntry* entry = response_trailers->get(header); if (entry != nullptr) { - logged_headers->insert({header.get(), ProtobufTypes::String(entry->value().c_str())}); + logged_headers->insert( + {header.get(), ProtobufTypes::String(entry->value().getStringView())}); } } } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 3d7760ece43d6..747e163d6521e 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -38,7 +38,8 @@ struct SuccessResponse { // UpstreamHeaderMatcher if (context->matchers_->matches(header.key().getStringView())) { context->response_->headers_to_add.emplace_back( - Http::LowerCaseString{header.key().c_str()}, header.value().c_str()); + Http::LowerCaseString{std::string(header.key().getStringView())}, + header.value().getStringView()); } return Http::HeaderMap::Iterate::Continue; }, @@ -215,7 +216,9 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::MessagePtr message) { // Set an error status if parsing status code fails. A Forbidden response is sent to the client // if the filter has not been configured with failure_mode_allow. uint64_t status_code{}; - if (!StringUtil::atoull(message->headers().Status()->value().c_str(), status_code)) { + // TODO(dnoe): Migrate to pure string_view to eliminate std:string instance (#6580) + const std::string status_string(message->headers().Status()->value().getStringView()); + if (!StringUtil::atoull(status_string.c_str(), status_code)) { ENVOY_LOG(warn, "ext_authz HTTP client failed to parse the HTTP status code."); return std::make_unique(errorResponse()); } diff --git a/source/extensions/filters/http/common/aws/utility.cc b/source/extensions/filters/http/common/aws/utility.cc index 55f4a5aab08b4..88836f7b7721b 100644 --- a/source/extensions/filters/http/common/aws/utility.cc +++ b/source/extensions/filters/http/common/aws/utility.cc @@ -21,18 +21,18 @@ std::map Utility::canonicalizeHeaders(const Http::Head return Http::HeaderMap::Iterate::Continue; } // Pseudo-headers should not be canonicalized - if (entry.key().c_str()[0] == ':') { + if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') { return Http::HeaderMap::Iterate::Continue; } std::string value(entry.value().getStringView()); // Remove leading, trailing, and deduplicate repeated ascii spaces absl::RemoveExtraAsciiWhitespace(&value); - const auto iter = map->find(entry.key().c_str()); + const auto iter = map->find(std::string(entry.key().getStringView())); // If the entry already exists, append the new value to the end if (iter != map->end()) { iter->second += fmt::format(",{}", value); } else { - map->emplace(entry.key().c_str(), value); + map->emplace(std::string(entry.key().getStringView()), value); } return Http::HeaderMap::Iterate::Continue; }, @@ -91,4 +91,4 @@ Utility::joinCanonicalHeaderNames(const std::map& cano } // namespace Common } // namespace HttpFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index 42443222d41b3..997beb5385fc2 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -54,7 +54,8 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::HeaderMap& headers, bo is_cors_request_ = true; const auto method = headers.Method(); - if (method == nullptr || method->value().c_str() != Http::Headers::get().MethodValues.Options) { + if (method == nullptr || + method->value().getStringView() != Http::Headers::get().MethodValues.Options) { return Http::FilterHeadersStatus::Continue; } @@ -134,7 +135,8 @@ bool CorsFilter::isOriginAllowedRegex(const Http::HeaderString& origin) { return false; } for (const auto& regex : *allowOriginRegexes()) { - if (std::regex_match(origin.c_str(), regex)) { + const absl::string_view origin_view = origin.getStringView(); + if (std::regex_match(origin_view.begin(), origin_view.end(), regex)) { return true; } } diff --git a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc index ab427876a20c8..3ddf79653ad28 100644 --- a/source/extensions/filters/http/dynamo/dynamo_request_parser.cc +++ b/source/extensions/filters/http/dynamo/dynamo_request_parser.cc @@ -59,7 +59,7 @@ std::string RequestParser::parseOperation(const Http::HeaderMap& headerMap) { const Http::HeaderEntry* x_amz_target = headerMap.get(X_AMZ_TARGET); if (x_amz_target) { // Normally x-amz-target contains Version.Operation, e.g., DynamoDB_20160101.GetItem - auto version_and_operation = StringUtil::splitToken(x_amz_target->value().c_str(), "."); + auto version_and_operation = StringUtil::splitToken(x_amz_target->value().getStringView(), "."); if (version_and_operation.size() == 2) { operation = std::string{version_and_operation[1]}; } diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 6c10fd2ff5fcf..c9502ff938c56 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -106,7 +106,8 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::HeaderMap& headers, b } if (headers.EnvoyDownstreamServiceCluster()) { - downstream_cluster_ = headers.EnvoyDownstreamServiceCluster()->value().c_str(); + downstream_cluster_ = + std::string(headers.EnvoyDownstreamServiceCluster()->value().getStringView()); downstream_cluster_delay_percent_key_ = fmt::format("fault.http.{}.delay.fixed_delay_percent", downstream_cluster_); @@ -361,7 +362,8 @@ bool FaultFilter::matchesDownstreamNodes(const Http::HeaderMap& headers) { return false; } - const std::string downstream_node = headers.EnvoyDownstreamServiceNode()->value().c_str(); + const absl::string_view downstream_node = + headers.EnvoyDownstreamServiceNode()->value().getStringView(); return fault_settings_->downstreamNodes().find(downstream_node) != fault_settings_->downstreamNodes().end(); } diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index 58e539c677667..fcf78ca15d003 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -59,7 +59,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { return request_delay_config_.get(); } const std::string& upstreamCluster() const { return upstream_cluster_; } - const std::unordered_set& downstreamNodes() const { return downstream_nodes_; } + const absl::flat_hash_set& downstreamNodes() const { return downstream_nodes_; } absl::optional maxActiveFaults() const { return max_active_faults_; } const Filters::Common::Fault::FaultRateLimitConfig* responseRateLimit() const { return response_rate_limit_.get(); @@ -71,7 +71,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { Filters::Common::Fault::FaultDelayConfigPtr request_delay_config_; std::string upstream_cluster_; // restrict faults to specific upstream cluster std::vector fault_filter_headers_; - std::unordered_set downstream_nodes_{}; // Inject failures for specific downstream + absl::flat_hash_set downstream_nodes_{}; // Inject failures for specific downstream absl::optional max_active_faults_; Filters::Common::Fault::FaultRateLimitConfigPtr response_rate_limit_; }; diff --git a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc index fb221b06a5be2..08245d1b5ec23 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc +++ b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc @@ -71,7 +71,9 @@ Http::FilterTrailersStatus Http1BridgeFilter::encodeTrailers(Http::HeaderMap& tr const Http::HeaderEntry* grpc_status_header = trailers.GrpcStatus(); if (grpc_status_header) { uint64_t grpc_status_code; - if (!StringUtil::atoull(grpc_status_header->value().c_str(), grpc_status_code) || + // TODO(dnoe): Migrate to pure string_view to eliminate std:string instance (#6580) + std::string grpc_status_code_string(grpc_status_header->value().getStringView()); + if (!StringUtil::atoull(grpc_status_code_string.c_str(), grpc_status_code) || grpc_status_code != 0) { response_headers_->Status()->value(enumToInt(Http::Code::ServiceUnavailable)); } diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index b87f41c3b1c8f..8af9266159639 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -33,7 +33,8 @@ void adjustContentLength(Http::HeaderMap& headers, auto length_header = headers.ContentLength(); if (length_header != nullptr) { uint64_t length; - if (StringUtil::atoull(length_header->value().c_str(), length)) { + const std::string length_header_string(length_header->value().getStringView()); + if (StringUtil::atoull(length_header_string.c_str(), length)) { length_header->value(adjustment(length)); } } @@ -56,7 +57,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e // We keep track of the original content-type to ensure that we handle // gRPC content type variations such as application/grpc+proto. - content_type_ = headers.ContentType()->value().c_str(); + content_type_ = std::string(headers.ContentType()->value().getStringView()); headers.ContentType()->value(upstream_content_type_); headers.insertAccept().value(upstream_content_type_); diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 95efe9885e83a..188cf374bb27f 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -155,8 +155,8 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( return ProtobufUtil::Status(Code::INVALID_ARGUMENT, "Request headers has application/grpc content-type"); } - const ProtobufTypes::String method = headers.Method()->value().c_str(); - ProtobufTypes::String path = headers.Path()->value().c_str(); + const ProtobufTypes::String method(headers.Method()->value().getStringView()); + ProtobufTypes::String path(headers.Path()->value().getStringView()); ProtobufTypes::String args; const size_t pos = path.find('?'); diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index d6dbccefb88a3..0cdfb9979423d 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -19,8 +19,8 @@ namespace GrpcWeb { const uint8_t GrpcWebFilter::GRPC_WEB_TRAILER = 0b10000000; // Supported gRPC-Web content-types. -const std::unordered_set& GrpcWebFilter::gRpcWebContentTypes() const { - static const std::unordered_set* types = new std::unordered_set( +const absl::flat_hash_set& GrpcWebFilter::gRpcWebContentTypes() const { + static const absl::flat_hash_set* types = new absl::flat_hash_set( {Http::Headers::get().ContentTypeValues.GrpcWeb, Http::Headers::get().ContentTypeValues.GrpcWebProto, Http::Headers::get().ContentTypeValues.GrpcWebText, @@ -31,7 +31,7 @@ const std::unordered_set& GrpcWebFilter::gRpcWebContentTypes() cons bool GrpcWebFilter::isGrpcWebRequest(const Http::HeaderMap& headers) { const Http::HeaderEntry* content_type = headers.ContentType(); if (content_type != nullptr) { - return gRpcWebContentTypes().count(content_type->value().c_str()) > 0; + return gRpcWebContentTypes().count(content_type->value().getStringView()) > 0; } return false; } @@ -51,9 +51,10 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::HeaderMap& headers, headers.removeContentLength(); setupStatTracking(headers); - if (content_type != nullptr && - (Http::Headers::get().ContentTypeValues.GrpcWebText == content_type->value().c_str() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == content_type->value().c_str())) { + if (content_type != nullptr && (Http::Headers::get().ContentTypeValues.GrpcWebText == + content_type->value().getStringView() || + Http::Headers::get().ContentTypeValues.GrpcWebTextProto == + content_type->value().getStringView())) { // Checks whether gRPC-Web client is sending base64 encoded request. is_text_request_ = true; } @@ -61,8 +62,9 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::HeaderMap& headers, const Http::HeaderEntry* accept = headers.Accept(); if (accept != nullptr && - (Http::Headers::get().ContentTypeValues.GrpcWebText == accept->value().c_str() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == accept->value().c_str())) { + (Http::Headers::get().ContentTypeValues.GrpcWebText == accept->value().getStringView() || + Http::Headers::get().ContentTypeValues.GrpcWebTextProto == + accept->value().getStringView())) { // Checks whether gRPC-Web client is asking for base64 encoded response. is_text_response_ = true; } @@ -192,9 +194,9 @@ Http::FilterTrailersStatus GrpcWebFilter::encodeTrailers(Http::HeaderMap& traile trailers.iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { Buffer::Instance* temp = static_cast(context); - temp->add(header.key().c_str(), header.key().size()); + temp->add(header.key().getStringView().data(), header.key().size()); temp->add(":"); - temp->add(header.value().c_str(), header.value().size()); + temp->add(header.value().getStringView().data(), header.value().size()); temp->add("\r\n"); return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.h b/source/extensions/filters/http/grpc_web/grpc_web_filter.h index 5ed34171966b2..07b5e3187a181 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.h +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.h @@ -56,7 +56,7 @@ class GrpcWebFilter : public Http::StreamFilter, NonCopyable { bool isGrpcWebRequest(const Http::HeaderMap& headers); static const uint8_t GRPC_WEB_TRAILER; - const std::unordered_set& gRpcWebContentTypes() const; + const absl::flat_hash_set& gRpcWebContentTypes() const; Upstream::ClusterInfoConstSharedPtr cluster_; Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; diff --git a/source/extensions/filters/http/gzip/gzip_filter.cc b/source/extensions/filters/http/gzip/gzip_filter.cc index 0a0c789c1851c..11d02c60a7713 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.cc +++ b/source/extensions/filters/http/gzip/gzip_filter.cc @@ -149,8 +149,8 @@ Http::FilterDataStatus GzipFilter::encodeData(Buffer::Instance& data, bool end_s bool GzipFilter::hasCacheControlNoTransform(Http::HeaderMap& headers) const { const Http::HeaderEntry* cache_control = headers.CacheControl(); if (cache_control) { - return StringUtil::caseFindToken(cache_control->value().c_str(), ",", - Http::Headers::get().CacheControlValues.NoTransform.c_str()); + return StringUtil::caseFindToken(cache_control->value().getStringView(), ",", + Http::Headers::get().CacheControlValues.NoTransform); } return false; @@ -166,8 +166,8 @@ bool GzipFilter::isAcceptEncodingAllowed(Http::HeaderMap& headers) const { if (accept_encoding) { bool is_wildcard = false; // true if found and not followed by `q=0`. - for (const auto token : StringUtil::splitToken(headers.AcceptEncoding()->value().c_str(), ",", - false /* keep_empty */)) { + for (const auto token : StringUtil::splitToken( + headers.AcceptEncoding()->value().getStringView(), ",", false /* keep_empty */)) { const auto value = StringUtil::trim(StringUtil::cropRight(token, ";")); const auto q_value = StringUtil::trim(StringUtil::cropLeft(token, ";")); // If value is the gzip coding, check the qvalue and return. @@ -211,7 +211,9 @@ bool GzipFilter::isAcceptEncodingAllowed(Http::HeaderMap& headers) const { bool GzipFilter::isContentTypeAllowed(Http::HeaderMap& headers) const { const Http::HeaderEntry* content_type = headers.ContentType(); if (content_type && !config_->contentTypeValues().empty()) { - std::string value{StringUtil::trim(StringUtil::cropRight(content_type->value().c_str(), ";"))}; + // TODO(dnoe): Eliminate std:string construction with Swiss table (#6580) + const std::string value{ + StringUtil::trim(StringUtil::cropRight(content_type->value().getStringView(), ";"))}; return config_->contentTypeValues().find(value) != config_->contentTypeValues().end(); } @@ -230,9 +232,10 @@ bool GzipFilter::isMinimumContentLength(Http::HeaderMap& headers) const { const Http::HeaderEntry* content_length = headers.ContentLength(); if (content_length) { uint64_t length; - const bool is_minimum_content_length = - StringUtil::atoull(content_length->value().c_str(), length) && - length >= config_->minimumLength(); + // TODO(dnoe): Make StringUtil::atoull and friends string_view friendly. + const std::string content_length_str(content_length->value().getStringView()); + const bool is_minimum_content_length = StringUtil::atoull(content_length_str.c_str(), length) && + length >= config_->minimumLength(); if (!is_minimum_content_length) { config_->stats().content_length_too_small_.inc(); } @@ -241,8 +244,8 @@ bool GzipFilter::isMinimumContentLength(Http::HeaderMap& headers) const { const Http::HeaderEntry* transfer_encoding = headers.TransferEncoding(); return (transfer_encoding && - StringUtil::caseFindToken(transfer_encoding->value().c_str(), ",", - Http::Headers::get().TransferEncodingValues.Chunked.c_str())); + StringUtil::caseFindToken(transfer_encoding->value().getStringView(), ",", + Http::Headers::get().TransferEncodingValues.Chunked)); } bool GzipFilter::isTransferEncodingAllowed(Http::HeaderMap& headers) const { @@ -251,7 +254,7 @@ bool GzipFilter::isTransferEncodingAllowed(Http::HeaderMap& headers) const { for (auto header_value : // TODO(gsagula): add Http::HeaderMap::string_view() so string length doesn't need to be // computed twice. Find all other sites where this can be improved. - StringUtil::splitToken(transfer_encoding->value().c_str(), ",", true)) { + StringUtil::splitToken(transfer_encoding->value().getStringView(), ",", true)) { const auto trimmed_value = StringUtil::trim(header_value); if (StringUtil::caseCompare(trimmed_value, Http::Headers::get().TransferEncodingValues.Gzip) || @@ -268,10 +271,10 @@ bool GzipFilter::isTransferEncodingAllowed(Http::HeaderMap& headers) const { void GzipFilter::insertVaryHeader(Http::HeaderMap& headers) { const Http::HeaderEntry* vary = headers.Vary(); if (vary) { - if (!StringUtil::findToken(vary->value().c_str(), ",", + if (!StringUtil::findToken(vary->value().getStringView(), ",", Http::Headers::get().VaryValues.AcceptEncoding, true)) { std::string new_header; - absl::StrAppend(&new_header, vary->value().c_str(), ", ", + absl::StrAppend(&new_header, vary->value().getStringView(), ", ", Http::Headers::get().VaryValues.AcceptEncoding); headers.insertVary().value(new_header); } @@ -288,7 +291,7 @@ void GzipFilter::insertVaryHeader(Http::HeaderMap& headers) { void GzipFilter::sanitizeEtagHeader(Http::HeaderMap& headers) { const Http::HeaderEntry* etag = headers.Etag(); if (etag) { - absl::string_view value(etag->value().c_str()); + absl::string_view value(etag->value().getStringView()); if (value.length() > 2 && !((value[0] == 'w' || value[0] == 'W') && value[1] == '/')) { headers.removeEtag(); } diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index 9e0bd9ea64043..b589c2c8bb716 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -203,7 +203,7 @@ std::vector ExtractorImpl::extract(const Http::HeaderMap& h } // Check query parameter locations. - const auto& params = Http::Utility::parseQueryString(headers.Path()->value().c_str()); + const auto& params = Http::Utility::parseQueryString(headers.Path()->value().getStringView()); for (const auto& location_it : param_locations_) { const auto& param_key = location_it.first; const auto& location_spec = location_it.second; diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 252bc330d739a..123e590a7e727 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -90,8 +90,8 @@ class PathMatcherImpl : public BaseMatcherImpl { bool matches(const Http::HeaderMap& headers) const override { if (BaseMatcherImpl::matchRoute(headers)) { const Http::HeaderString& path = headers.Path()->value(); - size_t compare_length = Http::Utility::findQueryStringStart(path) - path.c_str(); - + const size_t compare_length = + path.getStringView().length() - Http::Utility::findQueryStringStart(path).length(); auto real_path = path.getStringView().substr(0, compare_length); bool match = case_sensitive_ ? real_path == path_ : StringUtil::caseCompare(real_path, path_); if (match) { @@ -119,8 +119,10 @@ class RegexMatcherImpl : public BaseMatcherImpl { bool matches(const Http::HeaderMap& headers) const override { if (BaseMatcherImpl::matchRoute(headers)) { const Http::HeaderString& path = headers.Path()->value(); - const char* query_string_start = Http::Utility::findQueryStringStart(path); - if (std::regex_match(path.c_str(), query_string_start, regex_)) { + const absl::string_view query_string = Http::Utility::findQueryStringStart(path); + absl::string_view path_view = path.getStringView(); + path_view.remove_suffix(query_string.length()); + if (std::regex_match(path_view.begin(), path_view.end(), regex_)) { ENVOY_LOG(debug, "Regex requirement '{}' matched.", regex_str_); return true; } diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 11404f110b776..88315c1f68b1a 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -117,9 +117,9 @@ int StreamHandleWrapper::luaRespond(lua_State* state) { Http::HeaderMapPtr headers = buildHeadersFromTable(state, 2); uint64_t status; - if (headers->Status() == nullptr || - !StringUtil::atoull(headers->Status()->value().c_str(), status) || status < 200 || - status >= 600) { + const std::string status_string(headers->Status()->value().getStringView()); + if (headers->Status() == nullptr || !StringUtil::atoull(status_string.c_str(), status) || + status < 200 || status >= 600) { luaL_error(state, ":status must be between 200-599"); } @@ -212,8 +212,10 @@ void StreamHandleWrapper::onSuccess(Http::MessagePtr&& response) { response->headers().iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { lua_State* state = static_cast(context); - lua_pushstring(state, header.key().c_str()); - lua_pushstring(state, header.value().c_str()); + lua_pushlstring(state, header.key().getStringView().data(), + header.key().getStringView().length()); + lua_pushlstring(state, header.value().getStringView().data(), + header.value().getStringView().length()); lua_settable(state, -3); return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index cec3f527dc3b9..f675d34cfa598 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -25,8 +25,10 @@ int HeaderMapIterator::luaPairsIterator(lua_State* state) { parent_.iterator_.reset(); return 0; } else { - lua_pushstring(state, entries_[current_]->key().c_str()); - lua_pushstring(state, entries_[current_]->value().c_str()); + const absl::string_view key_view(entries_[current_]->key().getStringView()); + lua_pushlstring(state, key_view.data(), key_view.length()); + const absl::string_view value_view(entries_[current_]->value().getStringView()); + lua_pushlstring(state, value_view.data(), value_view.length()); current_++; return 2; } @@ -45,7 +47,8 @@ int HeaderMapWrapper::luaGet(lua_State* state) { const char* key = luaL_checkstring(state, 2); const Http::HeaderEntry* entry = headers_.get(Http::LowerCaseString(key)); if (entry != nullptr) { - lua_pushstring(state, entry->value().c_str()); + lua_pushlstring(state, entry->value().getStringView().data(), + entry->value().getStringView().length()); return 1; } else { return 0; diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/source/extensions/filters/http/squash/squash_filter.cc index d55f82f9410fb..9e12c6c4d43fe 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/source/extensions/filters/http/squash/squash_filter.cc @@ -197,7 +197,7 @@ void SquashFilter::onCreateAttachmentSuccess(Http::MessagePtr&& m) { // Get the config object that was created if (Http::Utility::getResponseStatus(m->headers()) != enumToInt(Http::Code::Created)) { ENVOY_LOG(debug, "Squash: can't create attachment object. status {} - not squashing", - m->headers().Status()->value().c_str()); + m->headers().Status()->value().getStringView()); doneSquashing(); } else { std::string debug_attachment_id; diff --git a/source/extensions/filters/http/tap/tap_config_impl.cc b/source/extensions/filters/http/tap/tap_config_impl.cc index 7dc047a6b3d20..68106ab0c6eaf 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.cc +++ b/source/extensions/filters/http/tap/tap_config_impl.cc @@ -17,8 +17,8 @@ Http::HeaderMap::Iterate fillHeaderList(const Http::HeaderEntry& header, void* c Protobuf::RepeatedPtrField& header_list = *reinterpret_cast*>(context); auto& new_header = *header_list.Add(); - new_header.set_key(header.key().c_str()); - new_header.set_value(header.value().c_str()); + new_header.set_key(std::string(header.key().getStringView())); + new_header.set_value(std::string(header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; } } // namespace diff --git a/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc index 398bbda698742..c1f3f99f7cb20 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.cc @@ -44,7 +44,8 @@ bool RequestHeadersAction::populateDescriptor(const RouteEntry&, RateLimit::Desc return false; } - descriptor.entries_.push_back({descriptor_key_, header_value->value().c_str()}); + descriptor.entries_.push_back( + {descriptor_key_, std::string(header_value->value().getStringView())}); return true; } diff --git a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc index 8d104250bff52..614e98de363ae 100644 --- a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc @@ -393,13 +393,15 @@ class RequestHeader { RequestHeader& rh = *static_cast(cb); if (key == Headers::get().ClientId.get()) { - rh.client_id_ = ClientId(header.value().c_str()); + rh.client_id_ = ClientId(std::string(header.value().getStringView())); } else if (key == Headers::get().Dest.get()) { - rh.dest_ = header.value().c_str(); + rh.dest_ = std::string(header.value().getStringView()); } else if (key.find(":d:") == 0 && key.size() > 3) { - rh.delegations_.emplace_back(std::string(key.substr(3)), header.value().c_str()); + rh.delegations_.emplace_back(std::string(key.substr(3)), + std::string(header.value().getStringView())); } else if (key[0] != ':') { - rh.contexts_.emplace_back(std::string(key), header.value().c_str()); + rh.contexts_.emplace_back(std::string(key), + std::string(header.value().getStringView())); } return Http::HeaderMap::Iterate::Continue; }, @@ -577,8 +579,8 @@ class ResponseHeader { [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { absl::string_view key = header.key().getStringView(); if (!key.empty() && key[0] != ':') { - static_cast*>(cb)->emplace_back(std::string(key), - header.value().c_str()); + static_cast*>(cb)->emplace_back( + std::string(key), std::string(header.value().getStringView())); } return Http::HeaderMap::Iterate::Continue; }, diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index 177418d2ff892..75faf846d9140 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -51,7 +51,8 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { request_headers_.lookup(Http::LowerCaseString{key}, &entry); switch (lookup_result) { case Http::HeaderMap::Lookup::Found: - return opentracing::string_view{entry->value().c_str(), entry->value().size()}; + return opentracing::string_view{entry->value().getStringView().data(), + entry->value().getStringView().length()}; case Http::HeaderMap::Lookup::NotFound: return opentracing::make_unexpected(opentracing::key_not_found_error); case Http::HeaderMap::Lookup::NotSupported: @@ -71,8 +72,10 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { static Http::HeaderMap::Iterate headerMapCallback(const Http::HeaderEntry& header, void* context) { OpenTracingCb* callback = static_cast(context); - opentracing::string_view key{header.key().c_str(), header.key().size()}; - opentracing::string_view value{header.value().c_str(), header.value().size()}; + opentracing::string_view key{header.key().getStringView().data(), + header.key().getStringView().length()}; + opentracing::string_view value{header.value().getStringView().data(), + header.value().getStringView().length()}; if ((*callback)(key, value)) { return Http::HeaderMap::Iterate::Continue; } else { @@ -154,7 +157,8 @@ Tracing::SpanPtr OpenTracingDriver::startSpan(const Tracing::Config& config, std::unique_ptr parent_span_ctx; if (propagation_mode == PropagationMode::SingleHeader && request_headers.OtSpanContext()) { opentracing::expected> parent_span_ctx_maybe; - std::string parent_context = Base64::decode(request_headers.OtSpanContext()->value().c_str()); + std::string parent_context = + Base64::decode(std::string(request_headers.OtSpanContext()->value().getStringView())); if (!parent_context.empty()) { InputConstMemoryStream istream{parent_context.data(), parent_context.size()}; diff --git a/source/extensions/tracers/zipkin/span_context_extractor.cc b/source/extensions/tracers/zipkin/span_context_extractor.cc index 50caff9db8c04..dc9413760367c 100644 --- a/source/extensions/tracers/zipkin/span_context_extractor.cc +++ b/source/extensions/tracers/zipkin/span_context_extractor.cc @@ -86,7 +86,7 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa if (b3_span_id_entry && b3_trace_id_entry) { // Extract trace id - which can either be 128 or 64 bit. For 128 bit, // it needs to be divided into two 64 bit numbers (high and low). - const std::string tid = b3_trace_id_entry->value().c_str(); + const std::string tid(b3_trace_id_entry->value().getStringView()); if (b3_trace_id_entry->value().size() == 32) { const std::string high_tid = tid.substr(0, 16); const std::string low_tid = tid.substr(16, 16); @@ -99,14 +99,14 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa throw ExtractorException(fmt::format("Invalid trace_id {}", tid.c_str())); } - const std::string spid = b3_span_id_entry->value().c_str(); + const std::string spid(b3_span_id_entry->value().getStringView()); if (!StringUtil::atoull(spid.c_str(), span_id, 16)) { throw ExtractorException(fmt::format("Invalid span id {}", spid.c_str())); } auto b3_parent_id_entry = request_headers_.get(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID); if (b3_parent_id_entry && !b3_parent_id_entry->value().empty()) { - const std::string pspid = b3_parent_id_entry->value().c_str(); + const std::string pspid(b3_parent_id_entry->value().getStringView()); if (!StringUtil::atoull(pspid.c_str(), parent_id, 16)) { throw ExtractorException(fmt::format("Invalid parent span id {}", pspid.c_str())); } @@ -123,7 +123,7 @@ std::pair SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { auto b3_head_entry = request_headers_.get(ZipkinCoreConstants::get().B3); ASSERT(b3_head_entry); - const std::string b3 = b3_head_entry->value().c_str(); + const std::string b3(b3_head_entry->value().getStringView()); if (!b3.length()) { throw ExtractorException("Invalid input: empty"); } @@ -226,4 +226,4 @@ SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { } // namespace Zipkin } // namespace Tracers } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 84b5f26530b1c..a2ccbb989afb9 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -105,12 +105,13 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, Http::HeaderMa auto ret_span_context = extractor.extractSpanContext(sampled); if (!ret_span_context.second) { // Create a root Zipkin span. No context was found in the headers. - new_zipkin_span = - tracer.startSpan(config, request_headers.Host()->value().c_str(), start_time); + new_zipkin_span = tracer.startSpan( + config, std::string(request_headers.Host()->value().getStringView()), start_time); new_zipkin_span->setSampled(sampled); } else { - new_zipkin_span = tracer.startSpan(config, request_headers.Host()->value().c_str(), - start_time, ret_span_context.first); + new_zipkin_span = + tracer.startSpan(config, std::string(request_headers.Host()->value().getStringView()), + start_time, ret_span_context.first); } } catch (const ExtractorException& e) { diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index 3eec9b65e749f..35c2e6d3b87f1 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -64,12 +64,12 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) { http_callbacks_ = &callbacks; - EXPECT_EQ("POST", std::string(request->headers().Method()->value().c_str())); + EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); EXPECT_EQ(Http::Headers::get().ContentTypeValues.Json, - std::string(request->headers().ContentType()->value().c_str())); - EXPECT_EQ("eds_cluster", std::string(request->headers().Host()->value().c_str())); + std::string(request->headers().ContentType()->value().getStringView())); + EXPECT_EQ("eds_cluster", std::string(request->headers().Host()->value().getStringView())); EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().c_str())); + std::string(request->headers().Path()->value().getStringView())); std::string expected_request = "{"; if (!version_.empty()) { expected_request += "\"version_info\":\"" + version + "\","; @@ -82,7 +82,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { expected_request += "}"; EXPECT_EQ(expected_request, request->bodyAsString()); EXPECT_EQ(fmt::format_int(expected_request.size()).str(), - std::string(request->headers().ContentLength()->value().c_str())); + std::string(request->headers().ContentLength()->value().getStringView())); request_in_progress_ = true; return &http_request_; })); diff --git a/test/common/config/subscription_factory_test.cc b/test/common/config/subscription_factory_test.cc index 6c673162ed332..3497f16a1826e 100644 --- a/test/common/config/subscription_factory_test.cc +++ b/test/common/config/subscription_factory_test.cc @@ -252,10 +252,11 @@ TEST_F(SubscriptionFactoryTest, HttpSubscription) { EXPECT_CALL(cm_.async_client_, send_(_, _, _)) .WillOnce(Invoke([this](Http::MessagePtr& request, Http::AsyncClient::Callbacks&, const Http::AsyncClient::RequestOptions&) { - EXPECT_EQ("POST", std::string(request->headers().Method()->value().c_str())); - EXPECT_EQ("static_cluster", std::string(request->headers().Host()->value().c_str())); + EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); + EXPECT_EQ("static_cluster", + std::string(request->headers().Host()->value().getStringView())); EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().c_str())); + std::string(request->headers().Path()->value().getStringView())); return &http_request_; })); EXPECT_CALL(http_request_, cancel()); diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index 290328a53941a..8600ec8b9372f 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -79,25 +79,25 @@ TEST(GrpcCommonTest, ToGrpcTimeout) { Http::HeaderString value; Common::toGrpcTimeout(std::chrono::milliseconds(0UL), value); - EXPECT_STREQ("0m", value.c_str()); + EXPECT_EQ("0m", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(1UL), value); - EXPECT_STREQ("1m", value.c_str()); + EXPECT_EQ("1m", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000UL), value); - EXPECT_STREQ("100000S", value.c_str()); + EXPECT_EQ("100000S", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000000UL), value); - EXPECT_STREQ("1666666M", value.c_str()); + EXPECT_EQ("1666666M", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(9000000000000UL), value); - EXPECT_STREQ("2500000H", value.c_str()); + EXPECT_EQ("2500000H", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(360000000000000UL), value); - EXPECT_STREQ("99999999H", value.c_str()); + EXPECT_EQ("99999999H", value.getStringView()); Common::toGrpcTimeout(std::chrono::milliseconds(UINT64_MAX), value); - EXPECT_STREQ("99999999H", value.c_str()); + EXPECT_EQ("99999999H", value.getStringView()); } TEST(GrpcCommonTest, ChargeStats) { @@ -135,71 +135,71 @@ TEST(GrpcCommonTest, PrepareHeaders) { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::nullopt); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("1m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("1m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("1000m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("1000m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("60000m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("60000m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("3600000m", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("3600000m", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(100000000)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("99999999H", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("99999999H", message->headers().GrpcTimeout()->value().getStringView()); } { Http::MessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(100000000000)); - EXPECT_STREQ("POST", message->headers().Method()->value().c_str()); - EXPECT_STREQ("/service_name/method_name", message->headers().Path()->value().c_str()); - EXPECT_STREQ("cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); - EXPECT_STREQ("1666666M", message->headers().GrpcTimeout()->value().c_str()); + EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); + EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); + EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("1666666M", message->headers().GrpcTimeout()->value().getStringView()); } } diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index f5ae5fd98e078..c14523bcfa075 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -57,7 +57,7 @@ class AsyncClientImplTest : public testing::Test { bool end_stream) { EXPECT_CALL(callbacks, onHeaders_(_, end_stream)) .WillOnce(Invoke([code](HeaderMap& headers, bool) -> void { - EXPECT_EQ(std::to_string(code), headers.Status()->value().c_str()); + EXPECT_EQ(std::to_string(code), headers.Status()->value().getStringView()); })); } diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 595c415246a83..dd6e61bcccf34 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -341,7 +341,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { .Times(2) .WillRepeatedly(Invoke([&](HeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_STREQ("http", headers.ForwardedProto()->value().c_str()); + EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); if (headers.Path()->value() == "/healthcheck") { filter->callbacks_->streamInfo().healthCheck(true); } @@ -406,7 +406,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillRepeatedly(Invoke([&](HeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_STREQ("http", headers.ForwardedProto()->value().c_str()); + EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); return FilterHeadersStatus::StopIteration; })); @@ -552,7 +552,7 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { EXPECT_CALL(*filter, encodeHeaders(_, true)); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("404", headers.Status()->value().c_str()); + EXPECT_EQ("404", headers.Status()->value().getStringView()); })); EXPECT_CALL(*filter, onDestroy()); @@ -589,7 +589,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { EXPECT_CALL(*filter, encodeHeaders(_, true)); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("400", headers.Status()->value().c_str()); + EXPECT_EQ("400", headers.Status()->value().getStringView()); })); EXPECT_CALL(*filter, onDestroy()); @@ -615,7 +615,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillRepeatedly(Invoke([&](HeaderMap& header_map, bool) -> FilterHeadersStatus { - EXPECT_EQ(normalized_path, header_map.Path()->value().c_str()); + EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); return FilterHeadersStatus::StopIteration; })); @@ -658,7 +658,7 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { EXPECT_CALL(*route_config_provider_.route_config_, route(_, _)) .WillOnce(Invoke([&](const Http::HeaderMap& header_map, uint64_t) { - EXPECT_EQ(normalized_path, header_map.Path()->value().c_str()); + EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); return route; })); EXPECT_CALL(filter_factory_, createFilterChain(_)) @@ -791,7 +791,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat // Verify decorator operation response header has been defined. EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("testOp", headers.EnvoyDecoratorOperation()->value().c_str()); + EXPECT_EQ("testOp", headers.EnvoyDecoratorOperation()->value().getStringView()); })); Buffer::OwnedImpl fake_input("1234"); @@ -922,7 +922,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato .WillOnce(Invoke([](HeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.EnvoyDecoratorOperation()); // Verify that decorator operation has been set as request header. - EXPECT_STREQ("testOp", headers.EnvoyDecoratorOperation()->value().c_str()); + EXPECT_EQ("testOp", headers.EnvoyDecoratorOperation()->value().getStringView()); return FilterHeadersStatus::StopIteration; })); @@ -1314,7 +1314,7 @@ TEST_F(HttpConnectionManagerImplTest, NoPath) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("404", headers.Status()->value().c_str()); + EXPECT_EQ("404", headers.Status()->value().getStringView()); })); Buffer::OwnedImpl fake_input("1234"); @@ -1365,7 +1365,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) { // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("408", headers.Status()->value().c_str()); + EXPECT_EQ("408", headers.Status()->value().getStringView()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -1447,7 +1447,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("408", headers.Status()->value().c_str()); + EXPECT_EQ("408", headers.Status()->value().getStringView()); })); std::string response_body; @@ -1554,7 +1554,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("408", headers.Status()->value().c_str()); + EXPECT_EQ("408", headers.Status()->value().getStringView()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -1626,7 +1626,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("408", headers.Status()->value().c_str()); + EXPECT_EQ("408", headers.Status()->value().getStringView()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -1677,7 +1677,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) // 200 upstream response. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("200", headers.Status()->value().c_str()); + EXPECT_EQ("200", headers.Status()->value().getStringView()); })); Buffer::OwnedImpl fake_input("1234"); @@ -1744,7 +1744,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { // 200 upstream response. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("200", headers.Status()->value().c_str()); + EXPECT_EQ("200", headers.Status()->value().getStringView()); })); std::string response_body; @@ -1809,7 +1809,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408 EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("408", headers.Status()->value().c_str()); + EXPECT_EQ("408", headers.Status()->value().getStringView()); })); EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -1989,7 +1989,7 @@ TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("403", headers.Status()->value().c_str()); + EXPECT_EQ("403", headers.Status()->value().getStringView()); })); Buffer::OwnedImpl fake_input("1234"); @@ -2019,7 +2019,7 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { EXPECT_CALL(encoder, encodeHeaders(_, false)) .WillOnce(Invoke([&](const HeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Connection()); - EXPECT_STREQ("upgrade", headers.Connection()->value().c_str()); + EXPECT_EQ("upgrade", headers.Connection()->value().getStringView()); })); EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); @@ -2069,7 +2069,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillOnce(Invoke([](HeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_STREQ("https", headers.ForwardedProto()->value().c_str()); + EXPECT_EQ("https", headers.ForwardedProto()->value().getStringView()); return FilterHeadersStatus::StopIteration; })); @@ -2128,7 +2128,7 @@ TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Server()); - EXPECT_STREQ("envoy-server-test", headers.Server()->value().c_str()); + EXPECT_EQ("envoy-server-test", headers.Server()->value().getStringView()); })); EXPECT_CALL(*decoder_filters_[0], onDestroy()); EXPECT_CALL(filter_callbacks_.connection_, @@ -2170,7 +2170,7 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { EXPECT_CALL(encoder, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Server()); - EXPECT_STREQ("", headers.Server()->value().c_str()); + EXPECT_EQ("", headers.Server()->value().getStringView()); })); filter->callbacks_->encodeHeaders(std::move(response_headers), false); @@ -3261,7 +3261,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) .WillOnce(Invoke([&](HeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_STREQ("11", headers.ContentLength()->value().c_str()); + EXPECT_EQ("11", headers.ContentLength()->value().getStringView()); return FilterHeadersStatus::Continue; })); EXPECT_CALL(*encoder_filters_[0], encodeComplete()); @@ -3957,7 +3957,7 @@ TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { // 503 direct response when overloaded. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("503", headers.Status()->value().c_str()); + EXPECT_EQ("503", headers.Status()->value().getStringView()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -3995,7 +3995,7 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenOverloaded) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ("close", headers.Connection()->value().c_str()); + EXPECT_EQ("close", headers.Connection()->value().getStringView()); })); Buffer::OwnedImpl fake_input("1234"); @@ -4016,7 +4016,7 @@ TEST_F(HttpConnectionManagerImplTest, OverlyLongHeadersRejected) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&response_code](const HeaderMap& headers, bool) -> void { - response_code = headers.Status()->value().c_str(); + response_code = std::string(headers.Status()->value().getStringView()); })); decoder->decodeHeaders(std::move(headers), true); conn_manager_->newStream(response_encoder_); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index d1bc974e8b8c3..37f627e07a5ab 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -209,7 +209,7 @@ TEST_F(ConnectionManagerUtilityTest, SkipXffAppendPassThruUseRemoteAddress) { EXPECT_EQ((MutateRequestRet{"12.12.12.12:0", false}), callMutateRequestHeaders(headers, Protocol::Http2)); - EXPECT_STREQ("198.51.100.1", headers.ForwardedFor()->value().c_str()); + EXPECT_EQ("198.51.100.1", headers.ForwardedFor()->value().getStringView()); } // Verify internal request and XFF is set when we are using remote address and the address is diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index 4e8f07658add7..fbf7c621059a1 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -91,8 +91,8 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) auto* header_entry = header_map->get(Http::LowerCaseString(get_and_mutate.key())); if (header_entry != nullptr) { // Do some read-only stuff. - (void)strlen(header_entry->key().c_str()); - (void)strlen(header_entry->value().c_str()); + (void)strlen(std::string(header_entry->key().getStringView()).c_str()); + (void)strlen(std::string(header_entry->value().getStringView()).c_str()); (void)strlen(header_entry->value().buffer()); header_entry->key().empty(); header_entry->value().empty(); diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 3962ed6ba0a14..f4fabb61145fd 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -18,8 +18,8 @@ TEST(HeaderStringTest, All) { { LowerCaseString static_string("hello"); HeaderString string(static_string); - EXPECT_STREQ("hello", string.c_str()); - EXPECT_EQ(static_string.get().c_str(), string.c_str()); + EXPECT_EQ("hello", string.getStringView()); + EXPECT_EQ(static_string.get(), string.getStringView()); EXPECT_EQ(5U, string.size()); } @@ -36,8 +36,8 @@ TEST(HeaderStringTest, All) { { std::string static_string("HELLO"); HeaderString string(static_string); - EXPECT_STREQ("HELLO", string.c_str()); - EXPECT_EQ(static_string.c_str(), string.c_str()); + EXPECT_EQ("HELLO", string.getStringView()); + EXPECT_EQ(static_string, string.getStringView()); EXPECT_EQ(5U, string.size()); } @@ -46,9 +46,9 @@ TEST(HeaderStringTest, All) { std::string static_string("HELLO"); HeaderString string1(static_string); HeaderString string2(std::move(string1)); - EXPECT_STREQ("HELLO", string2.c_str()); - EXPECT_EQ(static_string.c_str(), string1.c_str()); // NOLINT(bugprone-use-after-move) - EXPECT_EQ(static_string.c_str(), string2.c_str()); + EXPECT_EQ("HELLO", string2.getStringView()); + EXPECT_EQ(static_string, string1.getStringView()); // NOLINT(bugprone-use-after-move) + EXPECT_EQ(static_string, string2.getStringView()); EXPECT_EQ(5U, string1.size()); EXPECT_EQ(5U, string2.size()); } @@ -63,9 +63,9 @@ TEST(HeaderStringTest, All) { EXPECT_EQ(HeaderString::Type::Inline, string.type()); EXPECT_EQ(HeaderString::Type::Inline, string2.type()); string.append("world", 5); - EXPECT_STREQ("world", string.c_str()); + EXPECT_EQ("world", string.getStringView()); EXPECT_EQ(5UL, string.size()); - EXPECT_STREQ("hello", string2.c_str()); + EXPECT_EQ("hello", string2.getStringView()); EXPECT_EQ(5UL, string2.size()); } @@ -80,9 +80,9 @@ TEST(HeaderStringTest, All) { EXPECT_EQ(HeaderString::Type::Inline, string.type()); EXPECT_EQ(HeaderString::Type::Dynamic, string2.type()); string.append("b", 1); - EXPECT_STREQ("b", string.c_str()); + EXPECT_EQ("b", string.getStringView()); EXPECT_EQ(1UL, string.size()); - EXPECT_STREQ(large.c_str(), string2.c_str()); + EXPECT_EQ(large, string2.getStringView()); EXPECT_EQ(4096UL, string2.size()); } @@ -92,7 +92,7 @@ TEST(HeaderStringTest, All) { HeaderString string(static_string); string.setInteger(5); EXPECT_EQ(HeaderString::Type::Inline, string.type()); - EXPECT_STREQ("5", string.c_str()); + EXPECT_EQ("5", string.getStringView()); } // Static to inline string. @@ -101,7 +101,7 @@ TEST(HeaderStringTest, All) { HeaderString string(static_string); string.setCopy(static_string.c_str(), static_string.size()); EXPECT_EQ(HeaderString::Type::Inline, string.type()); - EXPECT_STREQ("HELLO", string.c_str()); + EXPECT_EQ("HELLO", string.getStringView()); } // Static clear() does nothing. @@ -111,7 +111,7 @@ TEST(HeaderStringTest, All) { EXPECT_EQ(HeaderString::Type::Reference, string.type()); string.clear(); EXPECT_EQ(HeaderString::Type::Reference, string.type()); - EXPECT_STREQ("HELLO", string.c_str()); + EXPECT_EQ("HELLO", string.getStringView()); } // Static to append. @@ -120,14 +120,14 @@ TEST(HeaderStringTest, All) { HeaderString string(static_string); EXPECT_EQ(HeaderString::Type::Reference, string.type()); string.append("a", 1); - EXPECT_STREQ("HELLOa", string.c_str()); + EXPECT_EQ("HELLOa", string.getStringView()); } // Copy inline { HeaderString string; string.setCopy("hello", 5); - EXPECT_STREQ("hello", string.c_str()); + EXPECT_EQ("hello", string.getStringView()); EXPECT_EQ(5U, string.size()); } @@ -136,8 +136,8 @@ TEST(HeaderStringTest, All) { HeaderString string; std::string large_value(4096, 'a'); string.setCopy(large_value.c_str(), large_value.size()); - EXPECT_STREQ(large_value.c_str(), string.c_str()); - EXPECT_NE(large_value.c_str(), string.c_str()); + EXPECT_EQ(large_value, string.getStringView()); + EXPECT_NE(large_value.c_str(), string.getStringView().data()); EXPECT_EQ(4096U, string.size()); } @@ -148,8 +148,8 @@ TEST(HeaderStringTest, All) { string.setCopy(large_value1.c_str(), large_value1.size()); std::string large_value2(2048, 'b'); string.setCopy(large_value2.c_str(), large_value2.size()); - EXPECT_STREQ(large_value2.c_str(), string.c_str()); - EXPECT_NE(large_value2.c_str(), string.c_str()); + EXPECT_EQ(large_value2, string.getStringView()); + EXPECT_NE(large_value2.c_str(), string.getStringView().data()); EXPECT_EQ(2048U, string.size()); } @@ -160,8 +160,8 @@ TEST(HeaderStringTest, All) { string.setCopy(large_value1.c_str(), large_value1.size()); std::string large_value2(16384, 'b'); string.setCopy(large_value2.c_str(), large_value2.size()); - EXPECT_STREQ(large_value2.c_str(), string.c_str()); - EXPECT_NE(large_value2.c_str(), string.c_str()); + EXPECT_EQ(large_value2, string.getStringView()); + EXPECT_NE(large_value2.c_str(), string.getStringView().data()); EXPECT_EQ(16384U, string.size()); } @@ -172,8 +172,8 @@ TEST(HeaderStringTest, All) { string.setCopy(large_value1.c_str(), large_value1.size()); std::string large_value2(16384, 'b'); string.setCopy(large_value2.c_str(), large_value2.size()); - EXPECT_STREQ(large_value2.c_str(), string.c_str()); - EXPECT_NE(large_value2.c_str(), string.c_str()); + EXPECT_EQ(large_value2, string.getStringView()); + EXPECT_NE(large_value2.c_str(), string.getStringView().data()); EXPECT_EQ(16384U, string.size()); } @@ -186,22 +186,22 @@ TEST(HeaderStringTest, All) { string.append("a", 1); EXPECT_EQ(HeaderString::Type::Dynamic, string.type()); test += 'a'; - EXPECT_STREQ(test.c_str(), string.c_str()); + EXPECT_EQ(test, string.getStringView()); } // Append into inline twice, then shift to dynamic. { HeaderString string; string.append("hello", 5); - EXPECT_STREQ("hello", string.c_str()); + EXPECT_EQ("hello", string.getStringView()); EXPECT_EQ(5U, string.size()); string.append("world", 5); - EXPECT_STREQ("helloworld", string.c_str()); + EXPECT_EQ("helloworld", string.getStringView()); EXPECT_EQ(10U, string.size()); std::string large(4096, 'a'); string.append(large.c_str(), large.size()); large = "helloworld" + large; - EXPECT_STREQ(large.c_str(), string.c_str()); + EXPECT_EQ(large, string.getStringView()); EXPECT_EQ(4106U, string.size()); } @@ -214,7 +214,7 @@ TEST(HeaderStringTest, All) { std::string large2 = large + large; string.append(large2.c_str(), large2.size()); large += large2; - EXPECT_STREQ(large.c_str(), string.c_str()); + EXPECT_EQ(large, string.getStringView()); EXPECT_EQ(384U, string.size()); } @@ -228,7 +228,7 @@ TEST(HeaderStringTest, All) { string.append(large2.c_str(), large2.size()); std::string large3(32, 'c'); string.append(large3.c_str(), large3.size()); - EXPECT_STREQ((large + large2 + large3).c_str(), string.c_str()); + EXPECT_EQ((large + large2 + large3), string.getStringView()); EXPECT_EQ(280U, string.size()); } @@ -236,7 +236,7 @@ TEST(HeaderStringTest, All) { { HeaderString string; string.setInteger(123456789); - EXPECT_STREQ("123456789", string.c_str()); + EXPECT_EQ("123456789", string.getStringView()); EXPECT_EQ(9U, string.size()); } @@ -246,7 +246,7 @@ TEST(HeaderStringTest, All) { std::string large(128, 'a'); string.append(large.c_str(), large.size()); string.setInteger(123456789); - EXPECT_STREQ("123456789", string.c_str()); + EXPECT_EQ("123456789", string.getStringView()); EXPECT_EQ(9U, string.size()); EXPECT_EQ(HeaderString::Type::Dynamic, string.type()); } @@ -256,17 +256,17 @@ TEST(HeaderStringTest, All) { const std::string static_string = "hello world"; HeaderString string; string.setReference(static_string); - EXPECT_EQ(string.c_str(), static_string.c_str()); + EXPECT_EQ(string.getStringView(), static_string); EXPECT_EQ(11U, string.size()); EXPECT_EQ(HeaderString::Type::Reference, string.type()); const std::string large(128, 'a'); string.setCopy(large.c_str(), large.size()); - EXPECT_NE(string.c_str(), large.c_str()); + EXPECT_NE(string.getStringView().data(), large.c_str()); EXPECT_EQ(HeaderString::Type::Dynamic, string.type()); string.setReference(static_string); - EXPECT_EQ(string.c_str(), static_string.c_str()); + EXPECT_EQ(string.getStringView(), static_string); EXPECT_EQ(11U, string.size()); EXPECT_EQ(HeaderString::Type::Reference, string.type()); } @@ -293,28 +293,28 @@ TEST(HeaderMapImplTest, InlineInsert) { headers.insertHost().value(std::string("hello")); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1, headers.size()); - EXPECT_STREQ(":authority", headers.Host()->key().c_str()); - EXPECT_STREQ("hello", headers.Host()->value().c_str()); - EXPECT_STREQ("hello", headers.get(Headers::get().Host)->value().c_str()); + EXPECT_EQ(":authority", headers.Host()->key().getStringView()); + EXPECT_EQ("hello", headers.Host()->value().getStringView()); + EXPECT_EQ("hello", headers.get(Headers::get().Host)->value().getStringView()); } TEST(HeaderMapImplTest, MoveIntoInline) { HeaderMapImpl headers; HeaderString key; - key.setCopy(Headers::get().CacheControl.get().c_str(), Headers::get().CacheControl.get().size()); + key.setCopy(Headers::get().CacheControl.get()); HeaderString value; value.setCopy("hello", 5); headers.addViaMove(std::move(key), std::move(value)); - EXPECT_STREQ("cache-control", headers.CacheControl()->key().c_str()); - EXPECT_STREQ("hello", headers.CacheControl()->value().c_str()); + EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); + EXPECT_EQ("hello", headers.CacheControl()->value().getStringView()); HeaderString key2; key2.setCopy(Headers::get().CacheControl.get().c_str(), Headers::get().CacheControl.get().size()); HeaderString value2; value2.setCopy("there", 5); headers.addViaMove(std::move(key2), std::move(value2)); - EXPECT_STREQ("cache-control", headers.CacheControl()->key().c_str()); - EXPECT_STREQ("hello,there", headers.CacheControl()->value().c_str()); + EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); + EXPECT_EQ("hello,there", headers.CacheControl()->value().getStringView()); } TEST(HeaderMapImplTest, Remove) { @@ -324,7 +324,7 @@ TEST(HeaderMapImplTest, Remove) { LowerCaseString static_key("hello"); std::string ref_value("value"); headers.addReference(static_key, ref_value); - EXPECT_STREQ("value", headers.get(static_key)->value().c_str()); + EXPECT_EQ("value", headers.get(static_key)->value().getStringView()); EXPECT_EQ(HeaderString::Type::Reference, headers.get(static_key)->value().type()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); @@ -335,7 +335,7 @@ TEST(HeaderMapImplTest, Remove) { // Add and remove by inline. headers.insertContentLength().value(5); - EXPECT_STREQ("5", headers.ContentLength()->value().c_str()); + EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); headers.removeContentLength(); @@ -345,7 +345,7 @@ TEST(HeaderMapImplTest, Remove) { // Add inline and remove by name. headers.insertContentLength().value(5); - EXPECT_STREQ("5", headers.ContentLength()->value().c_str()); + EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); headers.remove(Headers::get().ContentLength); @@ -385,7 +385,7 @@ TEST(HeaderMapImplTest, RemoveRegex) { // Add inline and remove by regex headers.insertContentLength().value(5); - EXPECT_STREQ("5", headers.ContentLength()->value().c_str()); + EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); headers.removePrefix(LowerCaseString("content")); @@ -421,7 +421,8 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -438,7 +439,8 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -452,21 +454,21 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { const std::string bar("bar"); headers.addReference(Headers::get().ContentLength, foo); headers.addReference(Headers::get().ContentLength, bar); - EXPECT_STREQ("foo,bar", headers.ContentLength()->value().c_str()); + EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); } { HeaderMapImpl headers; headers.addReferenceKey(Headers::get().ContentLength, "foo"); headers.addReferenceKey(Headers::get().ContentLength, "bar"); - EXPECT_STREQ("foo,bar", headers.ContentLength()->value().c_str()); + EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); } { HeaderMapImpl headers; headers.addReferenceKey(Headers::get().ContentLength, 5); headers.addReferenceKey(Headers::get().ContentLength, 6); - EXPECT_STREQ("5,6", headers.ContentLength()->value().c_str()); + EXPECT_EQ("5,6", headers.ContentLength()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); } { @@ -474,7 +476,7 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { const std::string foo("foo"); headers.addReference(Headers::get().ContentLength, foo); headers.addReferenceKey(Headers::get().ContentLength, 6); - EXPECT_STREQ("foo,6", headers.ContentLength()->value().c_str()); + EXPECT_EQ("foo,6", headers.ContentLength()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); } } @@ -483,7 +485,7 @@ TEST(HeaderMapImplTest, DoubleInlineSet) { HeaderMapImpl headers; headers.setReferenceKey(Headers::get().ContentType, "blah"); headers.setReferenceKey(Headers::get().ContentType, "text/html"); - EXPECT_STREQ("text/html", headers.ContentType()->value().c_str()); + EXPECT_EQ("text/html", headers.ContentType()->value().getStringView()); EXPECT_EQ(1UL, headers.size()); } @@ -491,20 +493,20 @@ TEST(HeaderMapImplTest, AddReferenceKey) { HeaderMapImpl headers; LowerCaseString foo("hello"); headers.addReferenceKey(foo, "world"); - EXPECT_NE("world", headers.get(foo)->value().c_str()); - EXPECT_STREQ("world", headers.get(foo)->value().c_str()); + EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); + EXPECT_EQ("world", headers.get(foo)->value().getStringView()); } TEST(HeaderMapImplTest, SetReferenceKey) { HeaderMapImpl headers; LowerCaseString foo("hello"); headers.setReferenceKey(foo, "world"); - EXPECT_NE("world", headers.get(foo)->value().c_str()); - EXPECT_STREQ("world", headers.get(foo)->value().c_str()); + EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); + EXPECT_EQ("world", headers.get(foo)->value().getStringView()); headers.setReferenceKey(foo, "monde"); - EXPECT_NE("monde", headers.get(foo)->value().c_str()); - EXPECT_STREQ("monde", headers.get(foo)->value().c_str()); + EXPECT_NE("monde", headers.get(foo)->value().getStringView().data()); + EXPECT_EQ("monde", headers.get(foo)->value().getStringView()); } TEST(HeaderMapImplTest, AddCopy) { @@ -516,16 +518,16 @@ TEST(HeaderMapImplTest, AddCopy) { const HeaderString& value = headers.get(*lcKeyPtr)->value(); - EXPECT_STREQ("world", value.c_str()); + EXPECT_EQ("world", value.getStringView()); EXPECT_EQ(5UL, value.size()); lcKeyPtr.reset(); const HeaderString& value2 = headers.get(LowerCaseString("hello"))->value(); - EXPECT_STREQ("world", value2.c_str()); + EXPECT_EQ("world", value2.getStringView()); EXPECT_EQ(5UL, value2.size()); - EXPECT_EQ(value.c_str(), value2.c_str()); + EXPECT_EQ(value.getStringView(), value2.getStringView()); EXPECT_EQ(1UL, headers.size()); // Repeat with an int value. @@ -543,14 +545,14 @@ TEST(HeaderMapImplTest, AddCopy) { const HeaderString& value3 = headers.get(*lcKeyPtr)->value(); - EXPECT_STREQ("42", value3.c_str()); + EXPECT_EQ("42", value3.getStringView()); EXPECT_EQ(2UL, value3.size()); lcKeyPtr.reset(); const HeaderString& value4 = headers.get(LowerCaseString("hello"))->value(); - EXPECT_STREQ("42", value4.c_str()); + EXPECT_EQ("42", value4.getStringView()); EXPECT_EQ(2UL, value4.size()); EXPECT_EQ(1UL, headers.size()); @@ -558,22 +560,22 @@ TEST(HeaderMapImplTest, AddCopy) { LowerCaseString lcKey3(std::string("he") + "ll" + "o"); EXPECT_STREQ("hello", lcKey3.get().c_str()); - EXPECT_STREQ("42", headers.get(lcKey3)->value().c_str()); + EXPECT_EQ("42", headers.get(lcKey3)->value().getStringView()); EXPECT_EQ(2UL, headers.get(lcKey3)->value().size()); LowerCaseString cache_control("cache-control"); headers.addCopy(cache_control, "max-age=1345"); - EXPECT_STREQ("max-age=1345", headers.get(cache_control)->value().c_str()); - EXPECT_STREQ("max-age=1345", headers.CacheControl()->value().c_str()); + EXPECT_EQ("max-age=1345", headers.get(cache_control)->value().getStringView()); + EXPECT_EQ("max-age=1345", headers.CacheControl()->value().getStringView()); headers.addCopy(cache_control, "public"); - EXPECT_STREQ("max-age=1345,public", headers.get(cache_control)->value().c_str()); + EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView()); headers.addCopy(cache_control, ""); - EXPECT_STREQ("max-age=1345,public", headers.get(cache_control)->value().c_str()); + EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView()); headers.addCopy(cache_control, 123); - EXPECT_STREQ("max-age=1345,public,123", headers.get(cache_control)->value().c_str()); + EXPECT_EQ("max-age=1345,public,123", headers.get(cache_control)->value().getStringView()); headers.addCopy(cache_control, std::numeric_limits::max()); - EXPECT_STREQ("max-age=1345,public,123,18446744073709551615", - headers.get(cache_control)->value().c_str()); + EXPECT_EQ("max-age=1345,public,123,18446744073709551615", + headers.get(cache_control)->value().getStringView()); } TEST(HeaderMapImplTest, Equality) { @@ -593,7 +595,7 @@ TEST(HeaderMapImplTest, LargeCharInHeader) { LowerCaseString static_key("\x90hello"); std::string ref_value("value"); headers.addReference(static_key, ref_value); - EXPECT_STREQ("value", headers.get(static_key)->value().c_str()); + EXPECT_EQ("value", headers.get(static_key)->value().getStringView()); } TEST(HeaderMapImplTest, Iterate) { @@ -613,7 +615,8 @@ TEST(HeaderMapImplTest, Iterate) { EXPECT_CALL(cb, Call("foo", "bar")); headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -635,8 +638,9 @@ TEST(HeaderMapImplTest, IterateReverse) { // no "hello" headers.iterateReverse( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); - if ("foo" != std::string{header.key().c_str()}) { + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); + if (header.key().getStringView() != "foo") { return HeaderMap::Iterate::Continue; } else { return HeaderMap::Iterate::Break; @@ -661,7 +665,7 @@ TEST(HeaderMapImplTest, Lookup) { { const HeaderEntry* entry; EXPECT_EQ(HeaderMap::Lookup::Found, headers.lookup(Headers::get().ContentLength, &entry)); - EXPECT_STREQ("5", entry->value().c_str()); + EXPECT_EQ("5", entry->value().getStringView()); } // Lookup returns HeaderMap::Lookup::NotFound if a predefined inline header does not exist. @@ -675,17 +679,17 @@ TEST(HeaderMapImplTest, Lookup) { TEST(HeaderMapImplTest, Get) { { const TestHeaderMapImpl headers{{":path", "/"}, {"hello", "world"}}; - EXPECT_STREQ("/", headers.get(LowerCaseString(":path"))->value().c_str()); - EXPECT_STREQ("world", headers.get(LowerCaseString("hello"))->value().c_str()); + EXPECT_EQ("/", headers.get(LowerCaseString(":path"))->value().getStringView()); + EXPECT_EQ("world", headers.get(LowerCaseString("hello"))->value().getStringView()); EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); } { TestHeaderMapImpl headers{{":path", "/"}, {"hello", "world"}}; headers.get(LowerCaseString(":path"))->value(std::string("/new_path")); - EXPECT_STREQ("/new_path", headers.get(LowerCaseString(":path"))->value().c_str()); + EXPECT_EQ("/new_path", headers.get(LowerCaseString(":path"))->value().getStringView()); headers.get(LowerCaseString("hello"))->value(std::string("world2")); - EXPECT_STREQ("world2", headers.get(LowerCaseString("hello"))->value().c_str()); + EXPECT_EQ("world2", headers.get(LowerCaseString("hello"))->value().getStringView()); EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); } } @@ -720,7 +724,7 @@ TEST(HeaderMapImplTest, TestAppendHeader) { HeaderString value4(empty); HeaderMapImpl::appendToHeader(value4, " "); value4.setInteger(0); - EXPECT_STREQ("0", value4.c_str()); + EXPECT_EQ("0", value4.getStringView()); EXPECT_EQ(1U, value4.size()); } } @@ -768,7 +772,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -783,7 +788,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -799,7 +805,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -814,7 +821,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -830,7 +838,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -847,7 +856,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -863,7 +873,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -882,7 +893,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -905,7 +917,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -928,7 +941,8 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(header.key().c_str(), header.value().c_str()); + static_cast(cb_v)->Call(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); return HeaderMap::Iterate::Continue; }, &cb); @@ -942,13 +956,13 @@ TEST(HeaderMapImplTest, TestHeaderMapImplyCopy) { TestHeaderMapImpl foo; foo.addCopy(LowerCaseString("foo"), "bar"); auto headers = std::make_unique(foo); - EXPECT_STREQ("bar", headers->get(LowerCaseString("foo"))->value().c_str()); + EXPECT_EQ("bar", headers->get(LowerCaseString("foo"))->value().getStringView()); TestHeaderMapImpl baz{{"foo", "baz"}}; baz = *headers; - EXPECT_STREQ("bar", baz.get(LowerCaseString("foo"))->value().c_str()); + EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); const TestHeaderMapImpl& baz2 = baz; baz = baz2; - EXPECT_STREQ("bar", baz.get(LowerCaseString("foo"))->value().c_str()); + EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); } } // namespace Http diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 0d8c82d134245..0163e9ea8a800 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -412,8 +412,8 @@ TEST(HeaderAddTest, HeaderAdd) { headers_to_add.iterate( [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { TestHeaderMapImpl* headers = static_cast(context); - Http::LowerCaseString lower_key{entry.key().c_str()}; - EXPECT_STREQ(entry.value().c_str(), headers->get(lower_key)->value().c_str()); + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value().getStringView(), headers->get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }, &headers); diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index a0abcd3e8645c..e2e354c1da9a4 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -467,12 +467,12 @@ TEST(HttpUtility, SendLocalGrpcReply) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ(headers.Status()->value().c_str(), "200"); + EXPECT_EQ(headers.Status()->value().getStringView(), "200"); EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().c_str(), + EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), std::to_string(enumToInt(Grpc::Status::GrpcStatus::Unknown))); EXPECT_NE(headers.GrpcMessage(), nullptr); - EXPECT_STREQ(headers.GrpcMessage()->value().c_str(), "large"); + EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), "large"); })); Utility::sendLocalReply(true, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", absl::nullopt, false); @@ -484,7 +484,7 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const HeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().c_str(), + EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), std::to_string(enumToInt(Grpc::Status::GrpcStatus::Unavailable))); })); Utility::sendLocalReply(true, callbacks, false, Http::Code::TooManyRequests, "", absl::nullopt, @@ -493,7 +493,7 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const HeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().c_str(), + EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), std::to_string(enumToInt(Grpc::Status::GrpcStatus::ResourceExhausted))); })); Utility::sendLocalReply( @@ -519,8 +519,8 @@ TEST(HttpUtility, SendLocalReplyHeadRequest) { bool is_reset = false; EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const HeaderMap& headers, bool) -> void { - EXPECT_STREQ(headers.ContentLength()->value().c_str(), - fmt::format("{}", strlen("large")).c_str()); + EXPECT_EQ(headers.ContentLength()->value().getStringView(), + fmt::format("{}", strlen("large"))); })); Utility::sendLocalReply(false, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", absl::nullopt, true); @@ -569,8 +569,8 @@ TEST(HttpUtility, TestPrepareHeaders) { Http::MessagePtr message = Utility::prepareHeaders(http_uri); - EXPECT_STREQ("/x/y/z", message->headers().Path()->value().c_str()); - EXPECT_STREQ("dns.name", message->headers().Host()->value().c_str()); + EXPECT_EQ("/x/y/z", message->headers().Path()->value().getStringView()); + EXPECT_EQ("dns.name", message->headers().Host()->value().getStringView()); } TEST(HttpUtility, QueryParamsToString) { diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 13d47e42d3da7..cbc37857c76e8 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -676,15 +676,15 @@ match: { prefix: "/new_endpoint" } EXPECT_EQ("123456000, 1, 12, 123, 1234, 12345, 123456, 1234560, 12345600, 123456000", header_map.get_("x-request-start-range")); - typedef std::map CountMap; + typedef absl::flat_hash_map CountMap; CountMap counts; header_map.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> Http::HeaderMap::Iterate { CountMap* m = static_cast(cb_v); - std::string key = std::string{header.key().c_str()}; + absl::string_view key = header.key().getStringView(); CountMap::iterator i = m->find(key); if (i == m->end()) { - m->insert({key, 1}); + m->insert({std::string(key), 1}); } else { i->second++; } diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index b51a005c111bc..431b80aecc9e4 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -491,7 +491,7 @@ TEST_F(RouterTest, AddCookie) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void { - EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().c_str()}, + EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().getStringView()}, "foo=\"" + cookie_value + "\"; Max-Age=1337; HttpOnly"); })); expectResponseTimerCreate(); @@ -538,7 +538,7 @@ TEST_F(RouterTest, AddCookieNoDuplicate) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void { - EXPECT_STREQ(headers.get(Http::Headers::get().SetCookie)->value().c_str(), "foo=baz"); + EXPECT_EQ(headers.get(Http::Headers::get().SetCookie)->value().getStringView(), "foo=baz"); })); expectResponseTimerCreate(); @@ -592,9 +592,9 @@ TEST_F(RouterTest, AddMultipleCookies) { headers.iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - if (header.key().c_str() == Http::Headers::get().SetCookie.get().c_str()) { + if (header.key() == Http::Headers::get().SetCookie.get()) { static_cast*>(context)->Call( - std::string(header.value().c_str())); + std::string(header.value().getStringView())); } return Http::HeaderMap::Iterate::Continue; }, diff --git a/test/common/router/shadow_writer_impl_test.cc b/test/common/router/shadow_writer_impl_test.cc index d4b8682fa7697..245c67be85b31 100644 --- a/test/common/router/shadow_writer_impl_test.cc +++ b/test/common/router/shadow_writer_impl_test.cc @@ -34,7 +34,7 @@ class ShadowWriterImplTest : public testing::Test { Invoke([&](Http::MessagePtr& inner_message, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { EXPECT_EQ(message, inner_message); - EXPECT_EQ(shadowed_host, message->headers().Host()->value().c_str()); + EXPECT_EQ(shadowed_host, message->headers().Host()->value().getStringView()); callback_ = &callbacks; return &request; })); diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index aca25bf4bc82b..bff564d59491f 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -850,9 +850,10 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().c_str(), host); - EXPECT_EQ(headers.Path()->value().c_str(), path); - EXPECT_EQ(headers.Scheme()->value().c_str(), Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.Host()->value().getStringView(), host); + EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.Scheme()->value().getStringView(), + Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -882,9 +883,10 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().c_str(), host); - EXPECT_EQ(headers.Path()->value().c_str(), path); - EXPECT_EQ(headers.Scheme()->value().c_str(), Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.Host()->value().getStringView(), host); + EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.Scheme()->value().getStringView(), + Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -916,8 +918,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValue) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().c_str(), host); - EXPECT_EQ(headers.Path()->value().c_str(), path); + EXPECT_EQ(headers.Host()->value().getStringView(), host); + EXPECT_EQ(headers.Path()->value().getStringView(), path); })); health_checker_->start(); @@ -977,23 +979,23 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillRepeatedly(Invoke([&](const Http::HeaderMap& headers, bool) { - EXPECT_EQ(headers.get(header_ok)->value().c_str(), value_ok); - EXPECT_EQ(headers.get(header_cool)->value().c_str(), value_cool); - EXPECT_EQ(headers.get(header_awesome)->value().c_str(), value_awesome); + EXPECT_EQ(headers.get(header_ok)->value().getStringView(), value_ok); + EXPECT_EQ(headers.get(header_cool)->value().getStringView(), value_cool); + EXPECT_EQ(headers.get(header_awesome)->value().getStringView(), value_awesome); - EXPECT_EQ(headers.UserAgent()->value().c_str(), value_user_agent); - EXPECT_EQ(headers.get(upstream_metadata)->value().c_str(), value_upstream_metadata); + EXPECT_EQ(headers.UserAgent()->value().getStringView(), value_user_agent); + EXPECT_EQ(headers.get(upstream_metadata)->value().getStringView(), value_upstream_metadata); - EXPECT_EQ(headers.get(protocol)->value().c_str(), value_protocol); - EXPECT_EQ(headers.get(downstream_remote_address_without_port)->value().c_str(), + EXPECT_EQ(headers.get(protocol)->value().getStringView(), value_protocol); + EXPECT_EQ(headers.get(downstream_remote_address_without_port)->value().getStringView(), value_downstream_remote_address_without_port); - EXPECT_EQ(headers.get(downstream_local_address)->value().c_str(), + EXPECT_EQ(headers.get(downstream_local_address)->value().getStringView(), value_downstream_local_address); - EXPECT_EQ(headers.get(downstream_local_address_without_port)->value().c_str(), + EXPECT_EQ(headers.get(downstream_local_address_without_port)->value().getStringView(), value_downstream_local_address_without_port); - EXPECT_NE(headers.get(start_time)->value().c_str(), current_start_time); - current_start_time = headers.get(start_time)->value().c_str(); + EXPECT_NE(headers.get(start_time)->value().getStringView(), current_start_time); + current_start_time = std::string(headers.get(start_time)->value().getStringView()); })); health_checker_->start(); @@ -1780,8 +1782,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAltPort) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().c_str(), host); - EXPECT_EQ(headers.Path()->value().c_str(), path); + EXPECT_EQ(headers.Host()->value().getStringView(), host); + EXPECT_EQ(headers.Path()->value().getStringView(), path); })); health_checker_->start(); @@ -2855,11 +2857,13 @@ class GrpcHealthCheckerImplTestBase { EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, bool) { EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - headers.ContentType()->value().c_str()); - EXPECT_EQ(std::string("/grpc.health.v1.Health/Check"), headers.Path()->value().c_str()); - EXPECT_EQ(Http::Headers::get().SchemeValues.Http, headers.Scheme()->value().c_str()); + headers.ContentType()->value().getStringView()); + EXPECT_EQ(std::string("/grpc.health.v1.Health/Check"), + headers.Path()->value().getStringView()); + EXPECT_EQ(Http::Headers::get().SchemeValues.Http, + headers.Scheme()->value().getStringView()); EXPECT_NE(nullptr, headers.Method()); - EXPECT_EQ(expected_host, headers.Host()->value().c_str()); + EXPECT_EQ(expected_host, headers.Host()->value().getStringView()); })); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeData(_, true)) .WillOnce(Invoke([&](Buffer::Instance& data, bool) { diff --git a/test/extensions/access_loggers/http_grpc/grpc_access_log_integration_test.cc b/test/extensions/access_loggers/http_grpc/grpc_access_log_integration_test.cc index c759bdd8856b7..0e3188e776839 100644 --- a/test/extensions/access_loggers/http_grpc/grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/http_grpc/grpc_access_log_integration_test.cc @@ -67,10 +67,11 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, AssertionResult waitForAccessLogRequest(const std::string& expected_request_msg_yaml) { envoy::service::accesslog::v2::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_STREQ("POST", access_log_request_->headers().Method()->value().c_str()); - EXPECT_STREQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", - access_log_request_->headers().Path()->value().c_str()); - EXPECT_STREQ("application/grpc", access_log_request_->headers().ContentType()->value().c_str()); + EXPECT_EQ("POST", access_log_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", + access_log_request_->headers().Path()->value().getStringView()); + EXPECT_EQ("application/grpc", + access_log_request_->headers().ContentType()->value().getStringView()); envoy::service::accesslog::v2::StreamAccessLogsMessage expected_request_msg; MessageUtil::loadFromYaml(expected_request_msg_yaml, expected_request_msg); @@ -140,7 +141,7 @@ TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); ASSERT_TRUE(waitForAccessLogRequest(R"EOF( http_logs: log_entry: @@ -178,7 +179,7 @@ TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { response = IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); ASSERT_TRUE(waitForAccessLogStream()); ASSERT_TRUE(waitForAccessLogRequest(fmt::format(R"EOF( identifier: diff --git a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc index 49761fac28f5b..bb7dd5920f86c 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc @@ -53,7 +53,7 @@ TEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("413", response->headers().Status()->value().c_str()); + EXPECT_EQ("413", response->headers().Status()->value().getStringView()); } ConfigHelper::HttpModifierFunction overrideConfig(const std::string& json_config) { @@ -94,7 +94,7 @@ TEST_P(BufferIntegrationTest, RouteDisabled) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(BufferIntegrationTest, RouteOverride) { @@ -120,7 +120,7 @@ TEST_P(BufferIntegrationTest, RouteOverride) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } } // namespace diff --git a/test/extensions/filters/http/common/aws/signer_impl_test.cc b/test/extensions/filters/http/common/aws/signer_impl_test.cc index 67f990b3228b9..fe4991c66d9d5 100644 --- a/test/extensions/filters/http/common/aws/signer_impl_test.cc +++ b/test/extensions/filters/http/common/aws/signer_impl_test.cc @@ -82,12 +82,12 @@ TEST_F(SignerImplTest, SignDateHeader) { addPath("/"); signer_.sign(*message_); EXPECT_EQ(nullptr, message_->headers().get(SignatureHeaders::get().ContentSha256)); - EXPECT_STREQ("20180102T030400Z", - message_->headers().get(SignatureHeaders::get().Date)->value().c_str()); - EXPECT_STREQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-date, " - "Signature=1310784f67248cab70d98b9404d601f30d8fe20bd1820560cce224f4131dc1cc", - message_->headers().Authorization()->value().c_str()); + EXPECT_EQ("20180102T030400Z", + message_->headers().get(SignatureHeaders::get().Date)->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-date, " + "Signature=1310784f67248cab70d98b9404d601f30d8fe20bd1820560cce224f4131dc1cc", + message_->headers().Authorization()->value().getStringView()); } // Verify we sign the security token header if the token is present in the credentials @@ -96,12 +96,13 @@ TEST_F(SignerImplTest, SignSecurityTokenHeader) { addMethod("GET"); addPath("/"); signer_.sign(*message_); - EXPECT_STREQ("token", - message_->headers().get(SignatureHeaders::get().SecurityToken)->value().c_str()); - EXPECT_STREQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-date;x-amz-security-token, " - "Signature=ff1d9fa7e54a72677b5336df047bb1f1493f86b92099973bf62da3af852d1679", - message_->headers().Authorization()->value().c_str()); + EXPECT_EQ( + "token", + message_->headers().get(SignatureHeaders::get().SecurityToken)->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-date;x-amz-security-token, " + "Signature=ff1d9fa7e54a72677b5336df047bb1f1493f86b92099973bf62da3af852d1679", + message_->headers().Authorization()->value().getStringView()); } // Verify we sign the content header as the hashed empty string if the body is empty @@ -110,12 +111,13 @@ TEST_F(SignerImplTest, SignEmptyContentHeader) { addMethod("GET"); addPath("/"); signer_.sign(*message_, true); - EXPECT_STREQ(SignatureConstants::get().HashedEmptyString.c_str(), - message_->headers().get(SignatureHeaders::get().ContentSha256)->value().c_str()); - EXPECT_STREQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().Authorization()->value().c_str()); + EXPECT_EQ( + SignatureConstants::get().HashedEmptyString, + message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", + message_->headers().Authorization()->value().getStringView()); } // Verify we sign the content header correctly when we have a body @@ -125,12 +127,13 @@ TEST_F(SignerImplTest, SignContentHeader) { addPath("/"); setBody("test1234"); signer_.sign(*message_, true); - EXPECT_STREQ("937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244", - message_->headers().get(SignatureHeaders::get().ContentSha256)->value().c_str()); - EXPECT_STREQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", - message_->headers().Authorization()->value().c_str()); + EXPECT_EQ( + "937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244", + message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", + message_->headers().Authorization()->value().getStringView()); } // Verify we sign some extra headers @@ -142,10 +145,10 @@ TEST_F(SignerImplTest, SignExtraHeaders) { addHeader("b", "b_value"); addHeader("c", "c_value"); signer_.sign(*message_); - EXPECT_STREQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=a;b;c;x-amz-date, " - "Signature=d5e025e1cf0d5af0d83110bc2ef1cafd2d9dca1dea9d7767f58308da64aa6558", - message_->headers().Authorization()->value().c_str()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=a;b;c;x-amz-date, " + "Signature=d5e025e1cf0d5af0d83110bc2ef1cafd2d9dca1dea9d7767f58308da64aa6558", + message_->headers().Authorization()->value().getStringView()); } // Verify signing a host header @@ -155,10 +158,10 @@ TEST_F(SignerImplTest, SignHostHeader) { addPath("/"); addHeader("host", "www.example.com"); signer_.sign(*message_); - EXPECT_STREQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=host;x-amz-date, " - "Signature=60216ee44dd651322ea10cc6747308dd30e582aaa773f6c1b1354e486385c021", - message_->headers().Authorization()->value().c_str()); + EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=host;x-amz-date, " + "Signature=60216ee44dd651322ea10cc6747308dd30e582aaa773f6c1b1354e486385c021", + message_->headers().Authorization()->value().getStringView()); } } // namespace @@ -166,4 +169,4 @@ TEST_F(SignerImplTest, SignHostHeader) { } // namespace Common } // namespace HttpFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 6f8450b441d49..9117f17be78ac 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -136,8 +136,9 @@ class GrpcJsonTranscoderIntegrationTest response_headers.iterate( [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { IntegrationStreamDecoder* response = static_cast(context); - Http::LowerCaseString lower_key{entry.key().c_str()}; - EXPECT_STREQ(entry.value().c_str(), response->headers().get(lower_key)->value().c_str()); + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value().getStringView(), + response->headers().get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }, response.get()); diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index b6af82749c8db..1854aaf2948b4 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -571,7 +571,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryError) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)) .WillOnce(Invoke([](Http::HeaderMap& headers, bool end_stream) { - EXPECT_STREQ("400", headers.Status()->value().c_str()); + EXPECT_EQ("400", headers.Status()->value().getStringView()); EXPECT_FALSE(end_stream); })); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index 70e6f51708b9a..bd52b270a517b 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -84,7 +84,7 @@ class GrpcWebFilterTest : public testing::TestWithParamvalue().c_str(), code); + StringUtil::atoull(std::string(headers.Status()->value().getStringView()).c_str(), code); EXPECT_EQ(static_cast(expected_code), code); })); EXPECT_CALL(decoder_callbacks_, encodeData(_, _)) @@ -94,12 +94,13 @@ class GrpcWebFilterTest : public testing::TestWithParamvalue().c_str()); + request_headers.ContentType()->value().getStringView()); // Ensure we never send content-length upstream EXPECT_EQ(nullptr, request_headers.ContentLength()); - EXPECT_EQ(Http::Headers::get().TEValues.Trailers, request_headers.TE()->value().c_str()); + EXPECT_EQ(Http::Headers::get().TEValues.Trailers, + request_headers.TE()->value().getStringView()); EXPECT_EQ(Http::Headers::get().GrpcAcceptEncodingValues.Default, - request_headers.GrpcAcceptEncoding()->value().c_str()); + request_headers.GrpcAcceptEncoding()->value().getStringView()); } GrpcWebFilter filter_; @@ -118,7 +119,7 @@ TEST_F(GrpcWebFilterTest, SupportedContentTypes) { request_headers.addCopy(Http::Headers::get().ContentType, content_type); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - request_headers.ContentType()->value().c_str()); + request_headers.ContentType()->value().getStringView()); } } @@ -282,8 +283,8 @@ TEST_P(GrpcWebFilterTest, Unary) { request_trailers.addCopy(Http::Headers::get().GrpcStatus, "0"); request_trailers.addCopy(Http::Headers::get().GrpcMessage, "ok"); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers)); - EXPECT_STREQ("0", request_trailers.GrpcStatus()->value().c_str()); - EXPECT_STREQ("ok", request_trailers.GrpcMessage()->value().c_str()); + EXPECT_EQ("0", request_trailers.GrpcStatus()->value().getStringView()); + EXPECT_EQ("ok", request_trailers.GrpcMessage()->value().getStringView()); // Tests response headers. Http::TestHeaderMapImpl response_headers; @@ -292,10 +293,10 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ("200", response_headers.get_(Http::Headers::get().Status.get())); if (accept_binary_response()) { EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebProto, - response_headers.ContentType()->value().c_str()); + response_headers.ContentType()->value().getStringView()); } else if (accept_text_response()) { EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebTextProto, - response_headers.ContentType()->value().c_str()); + response_headers.ContentType()->value().getStringView()); } else { FAIL() << "Unsupported gRPC-Web request accept: " << request_accept(); } @@ -326,7 +327,7 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ(std::string(B64_MESSAGE, B64_MESSAGE_SIZE), encoded_buffer.toString()); } else { FAIL() << "Unsupported gRPC-Web response content-type: " - << response_headers.ContentType()->value().c_str(); + << response_headers.ContentType()->value().getStringView(); } // Tests response trailers. @@ -343,7 +344,7 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ(std::string(TRAILERS, TRAILERS_SIZE), Base64::decode(trailers_buffer.toString())); } else { FAIL() << "Unsupported gRPC-Web response content-type: " - << response_headers.ContentType()->value().c_str(); + << response_headers.ContentType()->value().getStringView(); } } diff --git a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc index 4dae4bf226af0..09e210a39e60b 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc @@ -36,13 +36,13 @@ class GzipIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); ASSERT_TRUE(response->headers().ContentEncoding() != nullptr); EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, - response->headers().ContentEncoding()->value().c_str()); + response->headers().ContentEncoding()->value().getStringView()); ASSERT_TRUE(response->headers().TransferEncoding() != nullptr); EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, - response->headers().TransferEncoding()->value().c_str()); + response->headers().TransferEncoding()->value().getStringView()); Buffer::OwnedImpl decompressed_response{}; const Buffer::OwnedImpl compressed_response{response->body()}; @@ -61,7 +61,7 @@ class GzipIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); @@ -173,8 +173,8 @@ TEST_P(GzipIntegrationTest, UpstreamResponseAlreadyEncoded) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - ASSERT_STREQ("br", response->headers().ContentEncoding()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ("br", response->headers().ContentEncoding()->value().getStringView()); EXPECT_EQ(128U, response->body().size()); } @@ -197,7 +197,7 @@ TEST_P(GzipIntegrationTest, NotEnoughContentLength) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); EXPECT_EQ(10U, response->body().size()); } @@ -220,7 +220,7 @@ TEST_P(GzipIntegrationTest, EmptyResponse) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("204", response->headers().Status()->value().c_str()); + EXPECT_EQ("204", response->headers().Status()->value().getStringView()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); EXPECT_EQ(0U, response->body().size()); } @@ -275,9 +275,9 @@ TEST_P(GzipIntegrationTest, AcceptanceFullConfigChunkedResponse) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - ASSERT_STREQ("gzip", response->headers().ContentEncoding()->value().c_str()); - ASSERT_STREQ("chunked", response->headers().TransferEncoding()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); + ASSERT_EQ("chunked", response->headers().TransferEncoding()->value().getStringView()); } /** @@ -299,8 +299,8 @@ TEST_P(GzipIntegrationTest, AcceptanceFullConfigVeryHeader) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - ASSERT_STREQ("gzip", response->headers().ContentEncoding()->value().c_str()); - ASSERT_STREQ("Cookie, Accept-Encoding", response->headers().Vary()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); + ASSERT_EQ("Cookie, Accept-Encoding", response->headers().Vary()->value().getStringView()); } } // namespace Envoy diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index cd4d05df1dd2c..bb1552ebe5369 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -111,7 +111,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, NotHcRequest) { Buffer::OwnedImpl body; EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(body, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(service_response)); - EXPECT_STREQ("true", service_response.EnvoyImmediateHealthCheckFail()->value().c_str()); + EXPECT_EQ("true", service_response.EnvoyImmediateHealthCheckFail()->value().getStringView()); } TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { @@ -216,7 +216,8 @@ TEST_F(HealthCheckFilterNoPassThroughTest, HealthCheckFailedCallbackCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::HeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_STREQ("cluster_name", headers.EnvoyUpstreamHealthCheckedCluster()->value().c_str()); + EXPECT_EQ("cluster_name", + headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); EXPECT_EQ(nullptr, headers.EnvoyImmediateHealthCheckFail()); })); @@ -240,8 +241,8 @@ TEST_F(HealthCheckFilterPassThroughTest, Ok) { Http::TestHeaderMapImpl service_hc_respnose{{":status", "200"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_STREQ("cluster_name", - service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().c_str()); + EXPECT_EQ("cluster_name", + service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); } TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { @@ -260,8 +261,8 @@ TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map)); Http::TestHeaderMapImpl service_hc_respnose{{":status", "200"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_STREQ("cluster_name", - service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().c_str()); + EXPECT_EQ("cluster_name", + service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); } TEST_F(HealthCheckFilterPassThroughTest, Failed) { @@ -290,7 +291,8 @@ TEST_F(HealthCheckFilterCachingTest, CachedServiceUnavailableCallbackCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::HeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_STREQ("cluster_name", headers.EnvoyUpstreamHealthCheckedCluster()->value().c_str()); + EXPECT_EQ("cluster_name", + headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); })); EXPECT_CALL(callbacks_.stream_info_, @@ -311,7 +313,8 @@ TEST_F(HealthCheckFilterCachingTest, CachedOkCallbackNotCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::HeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_STREQ("cluster_name", headers.EnvoyUpstreamHealthCheckedCluster()->value().c_str()); + EXPECT_EQ("cluster_name", + headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, @@ -338,7 +341,8 @@ TEST_F(HealthCheckFilterCachingTest, All) { .Times(1) .WillRepeatedly(Invoke([&](Http::HeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_STREQ("cluster_name", headers.EnvoyUpstreamHealthCheckedCluster()->value().c_str()); + EXPECT_EQ("cluster_name", + headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); @@ -371,7 +375,8 @@ TEST_F(HealthCheckFilterCachingTest, DegradedHeader) { .Times(1) .WillRepeatedly(Invoke([&](Http::HeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_STREQ("cluster_name", headers.EnvoyUpstreamHealthCheckedCluster()->value().c_str()); + EXPECT_EQ("cluster_name", + headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); diff --git a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc index c7c5d0ad2f026..4dade512bf24b 100644 --- a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc @@ -111,7 +111,7 @@ TEST_P(LocalJwksIntegrationTest, WithGoodToken) { upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } // With local Jwks, this test verifies a request is rejected with an expired Jwt token. @@ -131,7 +131,7 @@ TEST_P(LocalJwksIntegrationTest, ExpiredToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("401", response->headers().Status()->value().c_str()); + EXPECT_EQ("401", response->headers().Status()->value().getStringView()); } TEST_P(LocalJwksIntegrationTest, MissingToken) { @@ -149,7 +149,7 @@ TEST_P(LocalJwksIntegrationTest, MissingToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("401", response->headers().Status()->value().c_str()); + EXPECT_EQ("401", response->headers().Status()->value().getStringView()); } TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { @@ -168,9 +168,9 @@ TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("401", response->headers().Status()->value().c_str()); - EXPECT_STRNE("0", response->headers().ContentLength()->value().c_str()); - EXPECT_STREQ("", response->body().c_str()); + EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_NE("0", response->headers().ContentLength()->value().getStringView()); + EXPECT_THAT(response->body(), ::testing::IsEmpty()); } // This test verifies a request is passed with a path that don't match any requirements. @@ -192,7 +192,7 @@ TEST_P(LocalJwksIntegrationTest, NoRequiresPath) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } // This test verifies JwtRequirement specified from filer state rules @@ -269,7 +269,7 @@ TEST_P(LocalJwksIntegrationTest, FilterStateRequirement) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(test.expected_status, response->headers().Status()->value().c_str()); + EXPECT_EQ(test.expected_status, response->headers().Status()->value().getStringView()); } } @@ -364,7 +364,7 @@ TEST_P(RemoteJwksIntegrationTest, WithGoodToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); cleanup(); } @@ -389,7 +389,7 @@ TEST_P(RemoteJwksIntegrationTest, FetchFailedJwks) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("401", response->headers().Status()->value().c_str()); + EXPECT_EQ("401", response->headers().Status()->value().getStringView()); cleanup(); } diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index 3b7d9b5adc9e1..8e72a0aef4f6f 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -156,32 +156,34 @@ name: envoy.lua encoder.encodeData(request_data2, true); waitForNextUpstreamRequest(); - EXPECT_STREQ("10", upstream_request_->headers() - .get(Http::LowerCaseString("request_body_size")) + EXPECT_EQ("10", upstream_request_->headers() + .get(Http::LowerCaseString("request_body_size")) + ->value() + .getStringView()); + + EXPECT_EQ("bar", upstream_request_->headers() + .get(Http::LowerCaseString("request_metadata_foo")) + ->value() + .getStringView()); + + EXPECT_EQ("bat", upstream_request_->headers() + .get(Http::LowerCaseString("request_metadata_baz")) + ->value() + .getStringView()); + EXPECT_EQ("false", upstream_request_->headers() + .get(Http::LowerCaseString("request_secure")) ->value() - .c_str()); - - EXPECT_STREQ("bar", upstream_request_->headers() - .get(Http::LowerCaseString("request_metadata_foo")) - ->value() - .c_str()); - - EXPECT_STREQ("bat", upstream_request_->headers() - .get(Http::LowerCaseString("request_metadata_baz")) - ->value() - .c_str()); - EXPECT_STREQ( - "false", - upstream_request_->headers().get(Http::LowerCaseString("request_secure"))->value().c_str()); - - EXPECT_STREQ( - "HTTP/1.1", - upstream_request_->headers().get(Http::LowerCaseString("request_protocol"))->value().c_str()); + .getStringView()); + + EXPECT_EQ("HTTP/1.1", upstream_request_->headers() + .get(Http::LowerCaseString("request_protocol")) + ->value() + .getStringView()); - EXPECT_STREQ("bar", upstream_request_->headers() - .get(Http::LowerCaseString("request_dynamic_metadata_value")) - ->value() - .c_str()); + EXPECT_EQ("bar", upstream_request_->headers() + .get(Http::LowerCaseString("request_dynamic_metadata_value")) + ->value() + .getStringView()); Http::TestHeaderMapImpl response_headers{{":status", "200"}, {"foo", "bar"}}; upstream_request_->encodeHeaders(response_headers, false); @@ -192,16 +194,21 @@ name: envoy.lua response->waitForEndStream(); - EXPECT_STREQ( - "7", response->headers().get(Http::LowerCaseString("response_body_size"))->value().c_str()); - EXPECT_STREQ( - "bar", - response->headers().get(Http::LowerCaseString("response_metadata_foo"))->value().c_str()); - EXPECT_STREQ( - "bat", - response->headers().get(Http::LowerCaseString("response_metadata_baz"))->value().c_str()); - EXPECT_STREQ("HTTP/1.1", - response->headers().get(Http::LowerCaseString("request_protocol"))->value().c_str()); + EXPECT_EQ("7", response->headers() + .get(Http::LowerCaseString("response_body_size")) + ->value() + .getStringView()); + EXPECT_EQ("bar", response->headers() + .get(Http::LowerCaseString("response_metadata_foo")) + ->value() + .getStringView()); + EXPECT_EQ("bat", response->headers() + .get(Http::LowerCaseString("response_metadata_baz")) + ->value() + .getStringView()); + EXPECT_EQ( + "HTTP/1.1", + response->headers().get(Http::LowerCaseString("request_protocol"))->value().getStringView()); EXPECT_EQ(nullptr, response->headers().get(Http::LowerCaseString("foo"))); cleanup(); @@ -249,13 +256,14 @@ name: envoy.lua lua_request_->encodeData(response_data1, true); waitForNextUpstreamRequest(); - EXPECT_STREQ( - "bar", - upstream_request_->headers().get(Http::LowerCaseString("upstream_foo"))->value().c_str()); - EXPECT_STREQ("4", upstream_request_->headers() - .get(Http::LowerCaseString("upstream_body_size")) - ->value() - .c_str()); + EXPECT_EQ("bar", upstream_request_->headers() + .get(Http::LowerCaseString("upstream_foo")) + ->value() + .getStringView()); + EXPECT_EQ("4", upstream_request_->headers() + .get(Http::LowerCaseString("upstream_body_size")) + ->value() + .getStringView()); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); @@ -308,7 +316,7 @@ name: envoy.lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("403", response->headers().Status()->value().c_str()); + EXPECT_EQ("403", response->headers().Status()->value().getStringView()); EXPECT_EQ("nope", response->body()); } @@ -341,7 +349,7 @@ name: envoy.lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } // Should survive from 30 calls when calling streamInfo():dynamicMetadata(). This is a regression @@ -374,7 +382,7 @@ name: envoy.lua response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } cleanup(); diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index d79359d41a067..345e7c32199d9 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -57,7 +57,7 @@ TEST_P(RBACIntegrationTest, Allowed) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(RBACIntegrationTest, Denied) { @@ -77,7 +77,7 @@ TEST_P(RBACIntegrationTest, Denied) { 1024); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("403", response->headers().Status()->value().c_str()); + EXPECT_EQ("403", response->headers().Status()->value().getStringView()); } TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { @@ -104,7 +104,7 @@ TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { @@ -129,7 +129,7 @@ TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("403", response->headers().Status()->value().c_str()); + EXPECT_EQ("403", response->headers().Status()->value().getStringView()); } TEST_P(RBACIntegrationTest, DeniedHeadReply) { @@ -149,10 +149,10 @@ TEST_P(RBACIntegrationTest, DeniedHeadReply) { 1024); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("403", response->headers().Status()->value().c_str()); + EXPECT_EQ("403", response->headers().Status()->value().getStringView()); ASSERT_TRUE(response->headers().ContentLength()); - EXPECT_STRNE("0", response->headers().ContentLength()->value().c_str()); - EXPECT_STREQ("", response->body().c_str()); + EXPECT_NE("0", response->headers().ContentLength()->value().getStringView()); + EXPECT_THAT(response->body(), ::testing::IsEmpty()); } TEST_P(RBACIntegrationTest, RouteOverride) { @@ -188,7 +188,7 @@ TEST_P(RBACIntegrationTest, RouteOverride) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } } // namespace diff --git a/test/extensions/filters/http/squash/squash_filter_integration_test.cc b/test/extensions/filters/http/squash/squash_filter_integration_test.cc index ab2ee89f96b77..c6850028195da 100644 --- a/test/extensions/filters/http/squash/squash_filter_integration_test.cc +++ b/test/extensions/filters/http/squash/squash_filter_integration_test.cc @@ -133,8 +133,8 @@ TEST_P(SquashFilterIntegrationTest, TestHappyPath) { response->waitForEndStream(); - EXPECT_STREQ("POST", create_stream->headers().Method()->value().c_str()); - EXPECT_STREQ("/api/v2/debugattachment/", create_stream->headers().Path()->value().c_str()); + EXPECT_EQ("POST", create_stream->headers().Method()->value().getStringView()); + EXPECT_EQ("/api/v2/debugattachment/", create_stream->headers().Path()->value().getStringView()); // Make sure the env var was replaced ProtobufWkt::Struct actualbody; MessageUtil::loadFromJson(create_stream->body().toString(), actualbody); @@ -146,10 +146,11 @@ TEST_P(SquashFilterIntegrationTest, TestHappyPath) { EXPECT_TRUE(MessageDifferencer::Equals(expectedbody, actualbody)); // The second request should be for the created object - EXPECT_STREQ("GET", get_stream->headers().Method()->value().c_str()); - EXPECT_STREQ("/api/v2/debugattachment/oF8iVdiJs5", get_stream->headers().Path()->value().c_str()); + EXPECT_EQ("GET", get_stream->headers().Method()->value().getStringView()); + EXPECT_EQ("/api/v2/debugattachment/oF8iVdiJs5", + get_stream->headers().Path()->value().getStringView()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(SquashFilterIntegrationTest, ErrorAttaching) { @@ -163,7 +164,7 @@ TEST_P(SquashFilterIntegrationTest, ErrorAttaching) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(SquashFilterIntegrationTest, TimeoutAttaching) { @@ -179,7 +180,7 @@ TEST_P(SquashFilterIntegrationTest, TimeoutAttaching) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(SquashFilterIntegrationTest, ErrorNoSquashServer) { @@ -190,7 +191,7 @@ TEST_P(SquashFilterIntegrationTest, ErrorNoSquashServer) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(SquashFilterIntegrationTest, BadCreateResponse) { @@ -202,7 +203,7 @@ TEST_P(SquashFilterIntegrationTest, BadCreateResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(SquashFilterIntegrationTest, BadGetResponse) { @@ -216,7 +217,7 @@ TEST_P(SquashFilterIntegrationTest, BadGetResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } } // namespace Envoy diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index 524451ca9ff99..47706214017b3 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -86,7 +86,7 @@ class TapIntegrationTest : public testing::TestWithParammakeRequestWithBody(admin_request_headers, admin_request_yaml); admin_response_->waitForHeaders(); - EXPECT_STREQ("200", admin_response_->headers().Status()->value().c_str()); + EXPECT_EQ("200", admin_response_->headers().Status()->value().getStringView()); EXPECT_FALSE(admin_response_->complete()); } diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index 5dc5984308b28..3afded9772b7f 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -76,11 +76,11 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes while (!(known_counter_exists && known_gauge_exists && known_histogram_exists)) { envoy::service::metrics::v2::StreamMetricsMessage request_msg; VERIFY_ASSERTION(metrics_service_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_STREQ("POST", metrics_service_request_->headers().Method()->value().c_str()); - EXPECT_STREQ("/envoy.service.metrics.v2.MetricsService/StreamMetrics", - metrics_service_request_->headers().Path()->value().c_str()); - EXPECT_STREQ("application/grpc", - metrics_service_request_->headers().ContentType()->value().c_str()); + EXPECT_EQ("POST", metrics_service_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("/envoy.service.metrics.v2.MetricsService/StreamMetrics", + metrics_service_request_->headers().Path()->value().getStringView()); + EXPECT_EQ("application/grpc", + metrics_service_request_->headers().ContentType()->value().getStringView()); EXPECT_TRUE(request_msg.envoy_metrics_size() > 0); const Protobuf::RepeatedPtrField<::io::prometheus::client::MetricFamily>& envoy_metrics = request_msg.envoy_metrics(); diff --git a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc index 2ddc885a59290..224f09c62c557 100644 --- a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc +++ b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc @@ -133,8 +133,9 @@ TEST_F(DatadogDriverTest, FlushSpansTimer) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_STREQ("fake_cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/msgpack", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/msgpack", + message->headers().ContentType()->value().getStringView()); return &request; })); diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index 75a34c1acf9a8..baf365a636241 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -168,10 +168,11 @@ TEST_F(LightStepDriverTest, FlushSeveralSpans) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_STREQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().c_str()); - EXPECT_STREQ("fake_cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("/lightstep.collector.CollectorService/Report", + message->headers().Path()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", + message->headers().ContentType()->value().getStringView()); return &request; })); @@ -230,10 +231,11 @@ TEST_F(LightStepDriverTest, FlushOneFailure) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_STREQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().c_str()); - EXPECT_STREQ("fake_cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("/lightstep.collector.CollectorService/Report", + message->headers().Path()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", + message->headers().ContentType()->value().getStringView()); return &request; })); @@ -274,10 +276,11 @@ TEST_F(LightStepDriverTest, FlushOneInvalidResponse) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_STREQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().c_str()); - EXPECT_STREQ("fake_cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("/lightstep.collector.CollectorService/Report", + message->headers().Path()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", + message->headers().ContentType()->value().getStringView()); return &request; })); @@ -352,10 +355,11 @@ TEST_F(LightStepDriverTest, FlushOneSpanGrpcFailure) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_STREQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().c_str()); - EXPECT_STREQ("fake_cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/grpc", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("/lightstep.collector.CollectorService/Report", + message->headers().Path()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/grpc", + message->headers().ContentType()->value().getStringView()); return &request; })); @@ -429,7 +433,7 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { {Tracing::Reason::Sampling, true}); EXPECT_EQ(1U, stats_.counter("tracing.opentracing.span_context_extraction_error").value()); - std::string injected_ctx = request_headers_.OtSpanContext()->value().c_str(); + std::string injected_ctx(request_headers_.OtSpanContext()->value().getStringView()); EXPECT_FALSE(injected_ctx.empty()); // Supply empty context. @@ -440,7 +444,7 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); span->injectContext(request_headers_); - injected_ctx = request_headers_.OtSpanContext()->value().c_str(); + injected_ctx = std::string(request_headers_.OtSpanContext()->value().getStringView()); EXPECT_FALSE(injected_ctx.empty()); // Context can be parsed fine. @@ -454,7 +458,7 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); request_headers_.removeOtSpanContext(); span_with_parent->injectContext(request_headers_); - injected_ctx = request_headers_.OtSpanContext()->value().c_str(); + injected_ctx = std::string(request_headers_.OtSpanContext()->value().getStringView()); EXPECT_FALSE(injected_ctx.empty()); } } @@ -476,8 +480,10 @@ TEST_F(LightStepDriverTest, SpawnChild) { childViaHeaders->injectContext(base1); childViaSpawn->injectContext(base2); - std::string base1_context = Base64::decode(base1.OtSpanContext()->value().c_str()); - std::string base2_context = Base64::decode(base2.OtSpanContext()->value().c_str()); + std::string base1_context = + Base64::decode(std::string(base1.OtSpanContext()->value().getStringView())); + std::string base2_context = + Base64::decode(std::string(base2.OtSpanContext()->value().getStringView())); EXPECT_FALSE(base1_context.empty()); EXPECT_FALSE(base2_context.empty()); diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index e538860b509f1..c918f38172073 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -145,9 +145,10 @@ TEST_F(ZipkinDriverTest, FlushSeveralSpans) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_STREQ("/api/v1/spans", message->headers().Path()->value().c_str()); - EXPECT_STREQ("fake_cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/json", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/json", + message->headers().ContentType()->value().getStringView()); return &request; })); @@ -195,9 +196,10 @@ TEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_STREQ("/api/v1/spans", message->headers().Path()->value().c_str()); - EXPECT_STREQ("fake_cluster", message->headers().Host()->value().c_str()); - EXPECT_STREQ("application/json", message->headers().ContentType()->value().c_str()); + EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); + EXPECT_EQ("application/json", + message->headers().ContentType()->value().getStringView()); return &request; })); @@ -633,9 +635,9 @@ TEST_F(ZipkinDriverTest, DuplicatedHeader) { Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, false}); - typedef std::function DupCallback; - DupCallback dup_callback = [](const std::string& key) -> bool { - static std::unordered_map dup; + typedef std::function DupCallback; + DupCallback dup_callback = [](absl::string_view key) -> bool { + static absl::flat_hash_map dup; if (dup.find(key) == dup.end()) { dup[key] = true; return false; @@ -647,7 +649,7 @@ TEST_F(ZipkinDriverTest, DuplicatedHeader) { span->injectContext(request_headers_); request_headers_.iterate( [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { - EXPECT_FALSE(static_cast(cb)->operator()(header.key().c_str())); + EXPECT_FALSE(static_cast(cb)->operator()(header.key().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &dup_callback); diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index e95899d2cfdb1..f15651ecfe026 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -170,7 +170,7 @@ TEST_P(SslIntegrationTest, AdminCertEndpoint) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/certs", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } // Validate certificate selection across different certificate types and client TLS versions. @@ -410,7 +410,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(128, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(256, response->body().size()); checkStats(); envoy::api::v2::core::Address expected_local_address; @@ -448,7 +448,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(128, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(256, response->body().size()); checkStats(); codec_client_->close(); diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index afa0e24415bb1..28d6b73f96a90 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -42,8 +42,8 @@ inline test::fuzz::Headers toHeaders(const Http::HeaderMap& headers) { headers.iterate( [](const Http::HeaderEntry& header, void* ctxt) -> Http::HeaderMap::Iterate { auto* fuzz_header = static_cast(ctxt)->add_headers(); - fuzz_header->set_key(header.key().c_str()); - fuzz_header->set_value(header.value().c_str()); + fuzz_header->set_key(std::string(header.key().getStringView())); + fuzz_header->set_value(std::string(header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &fuzz_headers); diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index f70fc5d175834..9ebce9094c624 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -149,7 +149,7 @@ TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); cleanupUpstreamAndDownstream(); codec_client_->waitForDisconnect(); @@ -261,7 +261,7 @@ TEST_P(DeltaCdsIntegrationTest, CdsClusterUpDownUp) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); cleanupUpstreamAndDownstream(); codec_client_->waitForDisconnect(); diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index dd8fe59451747..b8acab50c4424 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -79,19 +79,19 @@ class HdsIntegrationTest : public testing::TestWithParamwaitForEndStream(*dispatcher_)); host_upstream_->set_allow_unexpected_disconnects(true); - EXPECT_STREQ(host_stream_->headers().Path()->value().c_str(), "/healthcheck"); - EXPECT_STREQ(host_stream_->headers().Method()->value().c_str(), "GET"); - EXPECT_STREQ(host_stream_->headers().Host()->value().c_str(), "anna"); + EXPECT_EQ(host_stream_->headers().Path()->value().getStringView(), "/healthcheck"); + EXPECT_EQ(host_stream_->headers().Method()->value().getStringView(), "GET"); + EXPECT_EQ(host_stream_->headers().Host()->value().getStringView(), "anna"); - if (cluster2 != "") { + if (!cluster2.empty()) { ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_)); ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_)); ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_)); host2_upstream_->set_allow_unexpected_disconnects(true); - EXPECT_STREQ(host2_stream_->headers().Path()->value().c_str(), "/healthcheck"); - EXPECT_STREQ(host2_stream_->headers().Method()->value().c_str(), "GET"); - EXPECT_STREQ(host2_stream_->headers().Host()->value().c_str(), cluster2.c_str()); + EXPECT_EQ(host2_stream_->headers().Path()->value().getStringView(), "/healthcheck"); + EXPECT_EQ(host2_stream_->headers().Method()->value().getStringView(), "GET"); + EXPECT_EQ(host2_stream_->headers().Host()->value().getStringView(), cluster2); } } diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index c8275e762b8c2..44da6c4eab022 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -422,10 +422,10 @@ TEST_P(Http2IntegrationTest, GrpcRouterNotFound) { lookupPort("http"), "POST", "/service/notfound", "", downstream_protocol_, version_, "host", Http::Headers::get().ContentTypeValues.Grpc); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - response->headers().ContentType()->value().c_str()); - EXPECT_STREQ("12", response->headers().GrpcStatus()->value().c_str()); + response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("12", response->headers().GrpcStatus()->value().getStringView()); } TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } @@ -477,7 +477,7 @@ TEST_P(Http2IntegrationTest, GoAway) { codec_client_->close(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(Http2IntegrationTest, Trailers) { testTrailers(1024, 2048); } @@ -511,9 +511,9 @@ TEST_P(Http2IntegrationTest, GrpcRequestTimeout) { {"content-type", "application/grpc"}}); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_NE(response->headers().GrpcStatus(), nullptr); - EXPECT_STREQ("14", response->headers().GrpcStatus()->value().c_str()); // Service Unavailable + EXPECT_EQ("14", response->headers().GrpcStatus()->value().getStringView()); // Service Unavailable EXPECT_LT(0, test_server_->counter("cluster.cluster_0.upstream_rq_timeout")->value()); } @@ -580,7 +580,7 @@ TEST_P(Http2IntegrationTest, IdleTimeoutWithSimultaneousRequests) { EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_STREQ("200", response2->headers().Status()->value().c_str()); + EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); EXPECT_EQ(request2_bytes, response2->body().size()); // Validate that idle time is not kicked in. @@ -594,7 +594,7 @@ TEST_P(Http2IntegrationTest, IdleTimeoutWithSimultaneousRequests) { EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_STREQ("200", response1->headers().Status()->value().c_str()); + EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); EXPECT_EQ(request1_bytes, response1->body().size()); // Do not send any requests and validate idle timeout kicks in after both the requests are done. @@ -700,7 +700,7 @@ void Http2IntegrationTest::simultaneousRequest(int32_t request1_bytes, int32_t r EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_STREQ("200", response2->headers().Status()->value().c_str()); + EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); EXPECT_EQ(request2_bytes, response2->body().size()); // Respond to request 1 @@ -710,7 +710,7 @@ void Http2IntegrationTest::simultaneousRequest(int32_t request1_bytes, int32_t r EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_STREQ("200", response1->headers().Status()->value().c_str()); + EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); EXPECT_EQ(request2_bytes, response1->body().size()); // Cleanup both downstream and upstream @@ -897,10 +897,10 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieNoTtl) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_STREQ("200", response.headers().Status()->value().c_str()); + EXPECT_EQ("200", response.headers().Status()->value().getStringView()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); - served_by.insert( - response.headers().get(Http::LowerCaseString("x-served-by"))->value().c_str()); + served_by.insert(std::string( + response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); }); EXPECT_EQ(served_by.size(), num_upstreams_); } @@ -927,8 +927,9 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithNonzeroTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_STREQ("200", response.headers().Status()->value().c_str()); - std::string value = response.headers().get(Http::Headers::get().SetCookie)->value().c_str(); + EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + std::string value( + response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); set_cookies.insert(value); EXPECT_THAT(value, MatchesRegex("foo=.*; Max-Age=15; HttpOnly")); }); @@ -957,8 +958,9 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithZeroTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_STREQ("200", response.headers().Status()->value().c_str()); - std::string value = response.headers().get(Http::Headers::get().SetCookie)->value().c_str(); + EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + std::string value( + response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); set_cookies.insert(value); EXPECT_THAT(value, MatchesRegex("^foo=.*$")); }); @@ -987,10 +989,10 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieNoTtl) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_STREQ("200", response.headers().Status()->value().c_str()); + EXPECT_EQ("200", response.headers().Status()->value().getStringView()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); - served_by.insert( - response.headers().get(Http::LowerCaseString("x-served-by"))->value().c_str()); + served_by.insert(std::string( + response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); }); EXPECT_EQ(served_by.size(), 1); } @@ -1018,10 +1020,10 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_STREQ("200", response.headers().Status()->value().c_str()); + EXPECT_EQ("200", response.headers().Status()->value().getStringView()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); - served_by.insert( - response.headers().get(Http::LowerCaseString("x-served-by"))->value().c_str()); + served_by.insert(std::string( + response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); }); EXPECT_EQ(served_by.size(), 1); } diff --git a/test/integration/http2_upstream_integration_test.cc b/test/integration/http2_upstream_integration_test.cc index a872a3fc3f23b..0dd3ab6a66463 100644 --- a/test/integration/http2_upstream_integration_test.cc +++ b/test/integration/http2_upstream_integration_test.cc @@ -175,7 +175,7 @@ void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_STREQ("200", response2->headers().Status()->value().c_str()); + EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); EXPECT_EQ(response2_bytes, response2->body().size()); // Respond to request 1 @@ -185,7 +185,7 @@ void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_STREQ("200", response1->headers().Status()->value().c_str()); + EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); EXPECT_EQ(response1_bytes, response1->body().size()); } @@ -230,11 +230,11 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt responses[i]->waitForEndStream(); if (i % 2 != 0) { EXPECT_TRUE(responses[i]->complete()); - EXPECT_STREQ("200", responses[i]->headers().Status()->value().c_str()); + EXPECT_EQ("200", responses[i]->headers().Status()->value().getStringView()); EXPECT_EQ(response_bytes[i], responses[i]->body().length()); } else { // Upstream stream reset. - EXPECT_STREQ("503", responses[i]->headers().Status()->value().c_str()); + EXPECT_EQ("503", responses[i]->headers().Status()->value().getStringView()); } } } diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 4c9bb7b550946..d6cc94c337e23 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -340,7 +340,7 @@ void HttpIntegrationTest::checkSimpleRequestSuccess(uint64_t expected_request_si EXPECT_EQ(expected_request_size, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(expected_response_size, response->body().size()); } @@ -394,7 +394,7 @@ void HttpIntegrationTest::testRouterNotFound() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); } // Change the default route to be restrictive, and send a POST to an alternate route. @@ -405,7 +405,7 @@ void HttpIntegrationTest::testRouterNotFoundWithBody() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "POST", "/notfound", "foo", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); } void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() { @@ -433,7 +433,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ("upstream connect error or disconnect/reset before headers. reset reason: connection " "termination", response->body()); @@ -461,7 +461,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeResponseComplete( EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(0U, response->body().size()); } @@ -523,7 +523,7 @@ void HttpIntegrationTest::testRouterDownstreamDisconnectBeforeResponseComplete( EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -557,7 +557,7 @@ void HttpIntegrationTest::testRouterUpstreamResponseBeforeRequestComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -590,7 +590,7 @@ void HttpIntegrationTest::testRetry() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -614,7 +614,10 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "503"}}, false); - EXPECT_EQ(atoi(upstream_request_->headers().EnvoyAttemptCount()->value().c_str()), 1); + EXPECT_EQ( + atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) + .c_str()), + 1); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); @@ -623,7 +626,10 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(); - EXPECT_EQ(atoi(upstream_request_->headers().EnvoyAttemptCount()->value().c_str()), 2); + EXPECT_EQ( + atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) + .c_str()), + 2); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); @@ -632,7 +638,7 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -673,7 +679,7 @@ void HttpIntegrationTest::testGrpcRetry() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP2) { EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers)); @@ -706,8 +712,8 @@ void HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_ if (via.empty()) { EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::Headers::get().Via)); } else { - EXPECT_STREQ(via.c_str(), - upstream_request_->headers().get(Http::Headers::get().Via)->value().c_str()); + EXPECT_EQ(via, + upstream_request_->headers().get(Http::Headers::get().Via)->value().getStringView()); } if (additional_continue_from_upstream) { @@ -721,13 +727,13 @@ void HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_ response->waitForEndStream(); ASSERT_TRUE(response->complete()); ASSERT(response->continue_headers() != nullptr); - EXPECT_STREQ("100", response->continue_headers()->Status()->value().c_str()); + EXPECT_EQ("100", response->continue_headers()->Status()->value().getStringView()); EXPECT_EQ(nullptr, response->continue_headers()->Via()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); if (via.empty()) { EXPECT_EQ(nullptr, response->headers().Via()); } else { - EXPECT_STREQ(via.c_str(), response->headers().Via()->value().c_str()); + EXPECT_EQ(via.c_str(), response->headers().Via()->value().getStringView()); } } @@ -788,9 +794,9 @@ void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upst response->waitForEndStream(); EXPECT_TRUE(response->complete()); ASSERT(response->continue_headers() != nullptr); - EXPECT_STREQ("100", response->continue_headers()->Status()->value().c_str()); + EXPECT_EQ("100", response->continue_headers()->Status()->value().getStringView()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } void HttpIntegrationTest::testTwoRequests(bool network_backup) { @@ -822,7 +828,7 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); // Request 2. @@ -835,7 +841,7 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(512U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(1024U, response->body().size()); } @@ -864,7 +870,7 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t max_si if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("431", response->headers().Status()->value().c_str()); + EXPECT_EQ("431", response->headers().Status()->value().getStringView()); } else { response->waitForReset(); codec_client_->close(); @@ -872,7 +878,7 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t max_si } else { auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } } @@ -914,7 +920,7 @@ void HttpIntegrationTest::testDownstreamResetBeforeResponseComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -946,7 +952,7 @@ void HttpIntegrationTest::testTrailers(uint64_t request_size, uint64_t response_ } EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(response_size, response->body().size()); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP2) { EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers)); diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index edaa2e363cc83..f23dec2b295a6 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -170,7 +170,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("408", response->headers().Status()->value().c_str()); + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); EXPECT_EQ("stream timeout", response->body()); } @@ -183,9 +183,9 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutHeadRequestAfterDownstrea EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("408", response->headers().Status()->value().c_str()); - EXPECT_STREQ(fmt::format("{}", strlen("stream timeout")).c_str(), - response->headers().ContentLength()->value().c_str()); + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ(fmt::format("{}", strlen("stream timeout")), + response->headers().ContentLength()->value().getStringView()); EXPECT_EQ("", response->body()); } @@ -200,7 +200,7 @@ TEST_P(IdleTimeoutIntegrationTest, GlobalPerStreamIdleTimeoutAfterDownstreamHead EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("408", response->headers().Status()->value().c_str()); + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); EXPECT_EQ("stream timeout", response->body()); } @@ -217,7 +217,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeadersAnd EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("408", response->headers().Status()->value().c_str()); + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); EXPECT_EQ("stream timeout", response->body()); } @@ -233,7 +233,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterUpstreamHeaders) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ("", response->body()); } @@ -266,7 +266,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterBidiData) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ("aa", response->body()); } @@ -296,7 +296,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutTriggersOnBodilessPost) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("408", response->headers().Status()->value().c_str()); + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); EXPECT_EQ("request timeout", response->body()); } @@ -312,7 +312,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutUnconfiguredDoesNotTriggerOnBod EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("408", response->headers().Status()->value().c_str()); + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); EXPECT_NE("request timeout", response->body()); } @@ -370,7 +370,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutIsNotDisarmedByEncode100Continu EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("408", response->headers().Status()->value().c_str()); + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); EXPECT_EQ("request timeout", response->body()); } diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 2ce9f3b73b347..688d6fcd8be70 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -28,27 +28,27 @@ TEST_P(IntegrationAdminTest, HealthCheck) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "POST", "/healthcheck", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/healthcheck/fail", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); response = IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/healthcheck", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/healthcheck/ok", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); response = IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/healthcheck", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(IntegrationAdminTest, HealthCheckWithBufferFilter) { @@ -58,7 +58,7 @@ TEST_P(IntegrationAdminTest, HealthCheckWithBufferFilter) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/healthcheck", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(IntegrationAdminTest, AdminLogging) { @@ -67,25 +67,25 @@ TEST_P(IntegrationAdminTest, AdminLogging) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "POST", "/logging", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); // Bad level response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/logging?level=blah", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); // Bad logger response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/logging?blah=info", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); // This is going to stomp over custom log levels that are set on the command line. response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "POST", "/logging?level=warning", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); for (const Logger::Logger& logger : Logger::Registry::loggers()) { EXPECT_EQ("warning", logger.levelString()); } @@ -93,7 +93,7 @@ TEST_P(IntegrationAdminTest, AdminLogging) { response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "POST", "/logging?assert=trace", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(spdlog::level::trace, Logger::Registry::getLog(Logger::Id::assert).level()); spdlog::string_view_t level_name = spdlog::level::level_string_views[default_log_level_]; @@ -101,7 +101,7 @@ TEST_P(IntegrationAdminTest, AdminLogging) { fmt::format("/logging?level={}", level_name), "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); for (const Logger::Logger& logger : Logger::Registry::loggers()) { EXPECT_EQ(level_name, logger.levelString()); } @@ -109,12 +109,12 @@ TEST_P(IntegrationAdminTest, AdminLogging) { namespace { -const char* ContentType(const BufferingStreamDecoderPtr& response) { +std::string ContentType(const BufferingStreamDecoderPtr& response) { const Http::HeaderEntry* entry = response->headers().ContentType(); if (entry == nullptr) { return "(null)"; } - return entry->value().c_str(); + return std::string(entry->value().getStringView()); } } // namespace @@ -125,84 +125,84 @@ TEST_P(IntegrationAdminTest, Admin) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/notfound", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_NE(std::string::npos, response->body().find("invalid path. admin commands are:")) << response->body(); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/help", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_NE(std::string::npos, response->body().find("admin commands are:")) << response->body(); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/html; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/html; charset=UTF-8", ContentType(response)); EXPECT_NE(std::string::npos, response->body().find("Envoy Admin")) << response->body(); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/server_info", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("application/json", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats?usedonly", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); // Testing a filter with no matches response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats?filter=foo", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); // Testing a filter with matches response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats?filter=server", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats?filter=server&usedonly", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats?format=json&usedonly", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("application/json", ContentType(response)); validateStatsJson(response->body(), 0); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats?format=blah", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats?format=json", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("application/json", ContentType(response)); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); validateStatsJson(response->body(), 1); // Filtering stats by a regex with one match should return just that match. @@ -210,8 +210,8 @@ TEST_P(IntegrationAdminTest, Admin) { "/stats?format=json&filter=^server\\.version$", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("application/json", ContentType(response)); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); validateStatsJson(response->body(), 0); EXPECT_THAT(response->body(), testing::Eq("{\"stats\":[{\"name\":\"server.version\",\"value\":0}]}")); @@ -221,8 +221,8 @@ TEST_P(IntegrationAdminTest, Admin) { "/stats?format=json&filter=server\\.version", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("application/json", ContentType(response)); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); validateStatsJson(response->body(), 0); EXPECT_THAT(response->body(), testing::Eq("{\"stats\":[{\"name\":\"server.version\",\"value\":0}]}")); @@ -233,15 +233,15 @@ TEST_P(IntegrationAdminTest, Admin) { "/stats?format=json&filter=not_intended_to_appear", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("application/json", ContentType(response)); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); validateStatsJson(response->body(), 0); EXPECT_THAT(response->body(), testing::Eq("{\"stats\":[]}")); response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/stats?format=prometheus", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_THAT(response->body(), testing::HasSubstr( "envoy_http_downstream_rq_xx{envoy_response_code_class=\"4\",envoy_http_conn_" @@ -260,7 +260,7 @@ TEST_P(IntegrationAdminTest, Admin) { response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats/prometheus", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_THAT(response->body(), testing::HasSubstr( "envoy_http_downstream_rq_xx{envoy_response_code_class=\"4\",envoy_http_conn_" @@ -279,51 +279,51 @@ TEST_P(IntegrationAdminTest, Admin) { response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/clusters", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_THAT(response->body(), testing::HasSubstr("added_via_api")); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/cpuprofiler", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("400", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/hot_restart_version", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/reset_counters", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("text/plain; charset=UTF-8", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/certs", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("application/json", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/runtime", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("application/json", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/runtime?format=json", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("application/json", ContentType(response)); response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/listeners", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("application/json", ContentType(response)); Json::ObjectSharedPtr json = Json::Factory::loadFromString(response->body()); std::vector listener_info = json->asObjectArray(); @@ -339,8 +339,8 @@ TEST_P(IntegrationAdminTest, Admin) { response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/config_dump", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("application/json", ContentType(response)); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("application/json", ContentType(response)); json = Json::Factory::loadFromString(response->body()); size_t index = 0; const std::string expected_types[] = { @@ -386,7 +386,7 @@ TEST_P(IntegrationAdminTest, AdminOnDestroyCallbacks) { lookupPort("admin"), "GET", "/foo/bar", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); // Check that the added callback was invoked. EXPECT_EQ(test, false); @@ -405,15 +405,15 @@ TEST_P(IntegrationAdminTest, AdminCpuProfilerStart) { lookupPort("admin"), "POST", "/cpuprofiler?enable=y", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); #ifdef PROFILER_AVAILABLE - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); #else - EXPECT_STREQ("500", response->headers().Status()->value().c_str()); + EXPECT_EQ("500", response->headers().Status()->value().getStringView()); #endif response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "POST", "/cpuprofiler?enable=n", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } class IntegrationAdminIpv4Ipv6Test : public testing::Test, public HttpIntegrationTest { @@ -442,7 +442,7 @@ TEST_F(IntegrationAdminIpv4Ipv6Test, Ipv4Ipv6Listen) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/server_info", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } } @@ -467,7 +467,7 @@ class StatsMatcherIntegrationTest response_ = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats", "", downstreamProtocol(), version_); ASSERT_TRUE(response_->complete()); - EXPECT_STREQ("200", response_->headers().Status()->value().c_str()); + EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); } BufferingStreamDecoderPtr response_; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index dd7406282aba9..0611b2da8e989 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -84,12 +84,12 @@ TEST_P(IntegrationTest, RouterDirectResponse) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "direct.example.com"); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("example-value", response->headers() - .get(Envoy::Http::LowerCaseString("x-additional-header")) - ->value() - .c_str()); - EXPECT_STREQ("text/html", response->headers().ContentType()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("example-value", response->headers() + .get(Envoy::Http::LowerCaseString("x-additional-header")) + ->value() + .getStringView()); + EXPECT_EQ("text/html", response->headers().ContentType()->value().getStringView()); EXPECT_EQ(body, response->body()); } @@ -191,7 +191,7 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 1); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 1); @@ -206,7 +206,7 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response2->complete()); - EXPECT_STREQ("200", response2->headers().Status()->value().c_str()); + EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 2); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 2); } @@ -352,9 +352,11 @@ TEST_P(IntegrationTest, TestInlineHeaders) { EXPECT_EQ(upstream_headers->Host()->value(), "foo.com"); EXPECT_EQ(upstream_headers->CacheControl()->value(), "public,123"); ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString("foo")) != nullptr); - EXPECT_STREQ("bar", upstream_headers->get(Envoy::Http::LowerCaseString("foo"))->value().c_str()); + EXPECT_EQ("bar", + upstream_headers->get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString("eep")) != nullptr); - EXPECT_STREQ("baz", upstream_headers->get(Envoy::Http::LowerCaseString("eep"))->value().c_str()); + EXPECT_EQ("baz", + upstream_headers->get(Envoy::Http::LowerCaseString("eep"))->value().getStringView()); } // Verify for HTTP/1.0 a keep-alive header results in no connection: close. @@ -387,7 +389,7 @@ TEST_P(IntegrationTest, NoHost) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("400", response->headers().Status()->value().c_str()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); } TEST_P(IntegrationTest, BadPath) { @@ -490,7 +492,7 @@ TEST_P(IntegrationTest, UpstreamProtocolError) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } TEST_P(IntegrationTest, TestHead) { @@ -670,7 +672,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("413", response->headers().Status()->value().c_str()); + EXPECT_EQ("413", response->headers().Status()->value().getStringView()); // With no delayed close processing, Envoy will close the connection immediately after flushing // and this should instead return true. EXPECT_FALSE(codec_client_->waitForDisconnect(std::chrono::milliseconds(500))); @@ -776,7 +778,7 @@ TEST_P(IntegrationTest, NoConnectionPoolsFree) { response->waitForEndStream(); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_503", 1); EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_pool_overflow")->value(), 1); diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index 8d722aa94a38a..a7ebcc237a35f 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -248,10 +248,11 @@ class LoadStatsIntegrationTest : public testing::TestWithParamheaders().Method()->value().c_str()); - EXPECT_STREQ("/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats", - loadstats_stream_->headers().Path()->value().c_str()); - EXPECT_STREQ("application/grpc", loadstats_stream_->headers().ContentType()->value().c_str()); + EXPECT_EQ("POST", loadstats_stream_->headers().Method()->value().getStringView()); + EXPECT_EQ("/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats", + loadstats_stream_->headers().Path()->value().getStringView()); + EXPECT_EQ("application/grpc", + loadstats_stream_->headers().ContentType()->value().getStringView()); } while (!TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, loadstats_request.cluster_stats())); } @@ -274,8 +275,8 @@ class LoadStatsIntegrationTest : public testing::TestWithParambodyLength()); ASSERT_TRUE(response_->complete()); - EXPECT_STREQ(std::to_string(response_code).c_str(), - response_->headers().Status()->value().c_str()); + EXPECT_EQ(std::to_string(response_code), + response_->headers().Status()->value().getStringView()); EXPECT_EQ(response_size_, response_->body().size()); } @@ -583,7 +584,7 @@ TEST_P(LoadStatsIntegrationTest, Dropped) { initiateClientConnection(); response_->waitForEndStream(); ASSERT_TRUE(response_->complete()); - EXPECT_STREQ("503", response_->headers().Status()->value().c_str()); + EXPECT_EQ("503", response_->headers().Status()->value().getStringView()); cleanupUpstreamAndDownstream(); waitForLoadStatsRequest({}, 1); diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index 27c9818f4a949..f3084936c7025 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -72,7 +72,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); @@ -81,7 +81,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); @@ -95,7 +95,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(0U, response->body().size()); } @@ -118,8 +118,8 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); - EXPECT_STREQ("close", response->headers().Connection()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("close", response->headers().Connection()->value().getStringView()); // Deactivate overload state and check that keepalive is not disabled updateResource(0.7); @@ -129,7 +129,7 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(nullptr, response->headers().Connection()); } @@ -156,7 +156,7 @@ TEST_P(OverloadIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); } diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 5515391c010a8..e3171f40ce0b1 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -113,7 +113,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound404) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/unknown", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("404", response->headers().Status()->value().c_str()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); } // Add a route that uses unknown cluster (expect 503 Service Unavailable). @@ -128,7 +128,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/unknown", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } // Add a route which redirects HTTP to HTTPS, and verify Envoy sends a 301 @@ -141,9 +141,9 @@ TEST_P(ProtocolIntegrationTest, RouterRedirect) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/foo", "", downstream_protocol_, version_, "www.redirect.com"); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("301", response->headers().Status()->value().c_str()); - EXPECT_STREQ("https://www.redirect.com/foo", - response->headers().get(Http::Headers::get().Location)->value().c_str()); + EXPECT_EQ("301", response->headers().Status()->value().getStringView()); + EXPECT_EQ("https://www.redirect.com/foo", + response->headers().get(Http::Headers::get().Location)->value().getStringView()); } // Add a health check filter and verify correct computation of health based on upstream status. @@ -163,7 +163,7 @@ name: envoy.health_check response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } // Add a health check filter and verify correct computation of health based on upstream status. @@ -183,7 +183,7 @@ name: envoy.health_check response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } TEST_P(ProtocolIntegrationTest, AddEncodedTrailers) { @@ -201,12 +201,12 @@ config: {} response->waitForEndStream(); if (upstreamProtocol() == FakeHttpConnection::Type::HTTP2) { - EXPECT_STREQ("decode", upstream_request_->trailers()->GrpcMessage()->value().c_str()); + EXPECT_EQ("decode", upstream_request_->trailers()->GrpcMessage()->value().getStringView()); } EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { - EXPECT_STREQ("encode", response->trailers()->GrpcMessage()->value().c_str()); + EXPECT_EQ("encode", response->trailers()->GrpcMessage()->value().getStringView()); } } @@ -222,7 +222,7 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { EXPECT_TRUE(codec_client_->sawGoAway()); } @@ -259,7 +259,7 @@ TEST_P(ProtocolIntegrationTest, Retry) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -282,7 +282,10 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "503"}}, false); - EXPECT_EQ(atoi(upstream_request_->headers().EnvoyAttemptCount()->value().c_str()), 1); + EXPECT_EQ( + atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) + .c_str()), + 1); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); @@ -291,7 +294,10 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(); - EXPECT_EQ(atoi(upstream_request_->headers().EnvoyAttemptCount()->value().c_str()), 2); + EXPECT_EQ( + atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) + .c_str()), + 2); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); @@ -300,7 +306,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -378,7 +384,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryPriority) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -436,7 +442,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryHostPredicateFilter) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); } @@ -464,7 +470,7 @@ TEST_P(ProtocolIntegrationTest, RetryHittingBufferLimit) { EXPECT_EQ(66560U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } // Test hitting the dynamo filter with too many request bytes to buffer. Ensure the connection @@ -497,7 +503,7 @@ TEST_P(DownstreamProtocolIntegrationTest, HittingDecoderFilterLimit) { ASSERT_TRUE(response->complete()); } if (response->complete()) { - EXPECT_STREQ("413", response->headers().Status()->value().c_str()); + EXPECT_EQ("413", response->headers().Status()->value().getStringView()); } } @@ -528,7 +534,7 @@ TEST_P(DownstreamProtocolIntegrationTest, HittingEncoderFilterLimit) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("500", response->headers().Status()->value().c_str()); + EXPECT_EQ("500", response->headers().Status()->value().getStringView()); } TEST_P(ProtocolIntegrationTest, EnvoyHandling100Continue) { testEnvoyHandling100Continue(); } @@ -562,7 +568,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { @@ -586,7 +592,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_STREQ("400", response->headers().Status()->value().c_str()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); @@ -612,7 +618,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengths) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_STREQ("400", response->headers().Status()->value().c_str()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); @@ -645,7 +651,7 @@ name: encode-headers-only } EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ(0, response->body().size()); } @@ -668,7 +674,7 @@ name: decode-headers-only response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ(128, response->body().size()); } @@ -703,7 +709,7 @@ name: passthrough-filter } EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ(0, response->body().size()); } @@ -732,7 +738,7 @@ name: passthrough-filter response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ(128, response->body().size()); } @@ -769,7 +775,7 @@ name: decode-headers-only response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); EXPECT_EQ(0, upstream_request_->body().length()); } diff --git a/test/integration/ratelimit_integration_test.cc b/test/integration/ratelimit_integration_test.cc index 2f12f8f606d20..0315dc8779705 100644 --- a/test/integration/ratelimit_integration_test.cc +++ b/test/integration/ratelimit_integration_test.cc @@ -82,10 +82,11 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, RELEASE_ASSERT(result, result.message()); result = ratelimit_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); - EXPECT_STREQ("POST", ratelimit_request_->headers().Method()->value().c_str()); - EXPECT_STREQ("/envoy.service.ratelimit.v2.RateLimitService/ShouldRateLimit", - ratelimit_request_->headers().Path()->value().c_str()); - EXPECT_STREQ("application/grpc", ratelimit_request_->headers().ContentType()->value().c_str()); + EXPECT_EQ("POST", ratelimit_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("/envoy.service.ratelimit.v2.RateLimitService/ShouldRateLimit", + ratelimit_request_->headers().Path()->value().getStringView()); + EXPECT_EQ("application/grpc", + ratelimit_request_->headers().ContentType()->value().getStringView()); envoy::service::ratelimit::v2::RateLimitRequest expected_request_msg; expected_request_msg.set_domain("some_domain"); @@ -112,15 +113,15 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, EXPECT_EQ(request_size_, upstream_request_->bodyLength()); EXPECT_TRUE(response_->complete()); - EXPECT_STREQ("200", response_->headers().Status()->value().c_str()); + EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); EXPECT_EQ(response_size_, response_->body().size()); } void waitForFailedUpstreamResponse(uint32_t response_code) { response_->waitForEndStream(); EXPECT_TRUE(response_->complete()); - EXPECT_STREQ(std::to_string(response_code).c_str(), - response_->headers().Status()->value().c_str()); + EXPECT_EQ(std::to_string(response_code), + response_->headers().Status()->value().getStringView()); } void sendRateLimitResponse(envoy::service::ratelimit::v2::RateLimitResponse_Code code, @@ -134,8 +135,8 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, auto header = static_cast(context) ->mutable_headers() ->Add(); - header->set_key(h.key().c_str()); - header->set_value(h.value().c_str()); + header->set_key(std::string(h.key().getStringView())); + header->set_value(std::string(h.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &response_msg); @@ -209,8 +210,8 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { ratelimit_headers.iterate( [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { IntegrationStreamDecoder* response = static_cast(context); - Http::LowerCaseString lower_key{entry.key().c_str()}; - EXPECT_STREQ(entry.value().c_str(), response->headers().get(lower_key)->value().c_str()); + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }, response_.get()); @@ -247,8 +248,8 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { ratelimit_headers.iterate( [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { IntegrationStreamDecoder* response = static_cast(context); - Http::LowerCaseString lower_key{entry.key().c_str()}; - EXPECT_STREQ(entry.value().c_str(), response->headers().get(lower_key)->value().c_str()); + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; }, response_.get()); diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index e1a614557fc1c..dce879ecdb151 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -31,7 +31,7 @@ TEST_P(RedirectIntegrationTest, RedirectNotConfigured) { codec_client_ = makeHttpConnection(lookupPort("http")); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("302", response->headers().Status()->value().c_str()); + EXPECT_EQ("302", response->headers().Status()->value().getStringView()); } // Now test a route with redirects configured on in pass-through mode. @@ -41,7 +41,7 @@ TEST_P(RedirectIntegrationTest, InternalRedirectPassedThrough) { codec_client_ = makeHttpConnection(lookupPort("http")); default_request_headers_.insertHost().value("pass.through.internal.redirect", 30); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); - EXPECT_STREQ("302", response->headers().Status()->value().c_str()); + EXPECT_EQ("302", response->headers().Status()->value().getStringView()); EXPECT_EQ( 0, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); @@ -67,17 +67,17 @@ TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { waitForNextUpstreamRequest(); ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr); - EXPECT_STREQ("http://handle.internal.redirect/test/long/url", - upstream_request_->headers().EnvoyOriginalUrl()->value().c_str()); - EXPECT_STREQ("/new/url", upstream_request_->headers().Path()->value().c_str()); - EXPECT_STREQ("authority2", upstream_request_->headers().Host()->value().c_str()); - EXPECT_STREQ("via_value", upstream_request_->headers().Via()->value().c_str()); + EXPECT_EQ("http://handle.internal.redirect/test/long/url", + upstream_request_->headers().EnvoyOriginalUrl()->value().getStringView()); + EXPECT_EQ("/new/url", upstream_request_->headers().Path()->value().getStringView()); + EXPECT_EQ("authority2", upstream_request_->headers().Host()->value().getStringView()); + EXPECT_EQ("via_value", upstream_request_->headers().Via()->value().getStringView()); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); } @@ -92,7 +92,7 @@ TEST_P(RedirectIntegrationTest, InvalidRedirect) { codec_client_ = makeHttpConnection(lookupPort("http")); default_request_headers_.insertHost().value("handle.internal.redirect", 24); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); - EXPECT_STREQ("302", response->headers().Status()->value().c_str()); + EXPECT_EQ("302", response->headers().Status()->value().getStringView()); EXPECT_EQ( 1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index a8cc23cafaa08..0ce8217ec06f8 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -410,7 +410,7 @@ TEST_P(SdsDynamicUpstreamIntegrationTest, WrongSecretFirst) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/test/long/url", "", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); // To flush out the reset connection from the first request in upstream. FakeRawConnectionPtr fake_upstream_connection; diff --git a/test/integration/server.cc b/test/integration/server.cc index de0fef7425d1b..463837fe0bfcb 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -200,7 +200,7 @@ IntegrationTestServerImpl::~IntegrationTestServerImpl() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( admin_address, "POST", "/quitquitquit", "", Http::CodecClient::Type::HTTP1); EXPECT_TRUE(response->complete()); - EXPECT_STREQ("200", response->headers().Status()->value().c_str()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } } diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index d4b55d31e826d..ffe8f683ea3c6 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -45,7 +45,7 @@ void WebsocketIntegrationTest::validateUpgradeRequestHeaders( const Http::HeaderMap& original_request_headers) { Http::TestHeaderMapImpl proxied_request_headers(original_proxied_request_headers); if (proxied_request_headers.ForwardedProto()) { - ASSERT_STREQ(proxied_request_headers.ForwardedProto()->value().c_str(), "http"); + ASSERT_EQ(proxied_request_headers.ForwardedProto()->value().getStringView(), "http"); proxied_request_headers.removeForwardedProto(); } @@ -55,7 +55,7 @@ void WebsocketIntegrationTest::validateUpgradeRequestHeaders( proxied_request_headers.removeEnvoyExpectedRequestTimeoutMs(); if (proxied_request_headers.Scheme()) { - ASSERT_STREQ(proxied_request_headers.Scheme()->value().c_str(), "http"); + ASSERT_EQ(proxied_request_headers.Scheme()->value().getStringView(), "http"); } else { proxied_request_headers.insertScheme().value().append("http", 4); } @@ -74,7 +74,7 @@ void WebsocketIntegrationTest::validateUpgradeResponseHeaders( // Check for and remove headers added by default for HTTP responses. ASSERT_TRUE(proxied_response_headers.Date() != nullptr); ASSERT_TRUE(proxied_response_headers.Server() != nullptr); - ASSERT_STREQ(proxied_response_headers.Server()->value().c_str(), "envoy"); + ASSERT_EQ(proxied_response_headers.Server()->value().getStringView(), "envoy"); proxied_response_headers.removeDate(); proxied_response_headers.removeServer(); @@ -93,7 +93,7 @@ void WebsocketIntegrationTest::commonValidate(Http::HeaderMap& proxied_headers, // If no content length is specified, the HTTP1 codec will add a chunked encoding header. if (original_headers.ContentLength() == nullptr && proxied_headers.TransferEncoding() != nullptr) { - ASSERT_STREQ(proxied_headers.TransferEncoding()->value().c_str(), "chunked"); + ASSERT_EQ(proxied_headers.TransferEncoding()->value().getStringView(), "chunked"); proxied_headers.removeTransferEncoding(); } if (proxied_headers.Connection() != nullptr && @@ -369,7 +369,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { response_ = std::move(encoder_decoder.second); codec_client_->sendData(encoder_decoder.first, large_req_str, false); response_->waitForEndStream(); - EXPECT_STREQ("413", response_->headers().Status()->value().c_str()); + EXPECT_EQ("413", response_->headers().Status()->value().getStringView()); waitForClientDisconnectOrReset(); codec_client_->close(); } @@ -386,7 +386,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { response_ = std::move(encoder_decoder.second); codec_client_->sendData(encoder_decoder.first, large_req_str, false); response_->waitForEndStream(); - EXPECT_STREQ("413", response_->headers().Status()->value().c_str()); + EXPECT_EQ("413", response_->headers().Status()->value().getStringView()); waitForClientDisconnectOrReset(); codec_client_->close(); } diff --git a/test/integration/xfcc_integration_test.cc b/test/integration/xfcc_integration_test.cc index 410f74cd7756d..d352a3a3836eb 100644 --- a/test/integration/xfcc_integration_test.cc +++ b/test/integration/xfcc_integration_test.cc @@ -159,8 +159,8 @@ void XfccIntegrationTest::testRequestAndResponseWithXfccHeader(std::string previ if (expected_xfcc.empty()) { EXPECT_EQ(nullptr, upstream_request_->headers().ForwardedClientCert()); } else { - EXPECT_STREQ(expected_xfcc.c_str(), - upstream_request_->headers().ForwardedClientCert()->value().c_str()); + EXPECT_EQ(expected_xfcc, + upstream_request_->headers().ForwardedClientCert()->value().getStringView()); } upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); diff --git a/test/test_common/printers.cc b/test/test_common/printers.cc index 23aa6cb82d3e9..c6573e80a5853 100644 --- a/test/test_common/printers.cc +++ b/test/test_common/printers.cc @@ -12,7 +12,8 @@ void PrintTo(const HeaderMapImpl& headers, std::ostream* os) { headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { std::ostream* os = static_cast(context); - *os << "{'" << header.key().c_str() << "','" << header.value().c_str() << "'}"; + *os << "{'" << header.key().getStringView() << "','" << header.value().getStringView() + << "'}"; return HeaderMap::Iterate::Continue; }, os); diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 3589718497169..f77209822b07e 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -82,8 +82,8 @@ bool TestUtility::headerMapEqualIgnoreOrder(const Http::HeaderMap& lhs, [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { State* state = static_cast(context); const Http::HeaderEntry* entry = - state->lhs.get(Http::LowerCaseString(std::string(header.key().c_str()))); - if (entry == nullptr || (entry->value() != header.value().c_str())) { + state->lhs.get(Http::LowerCaseString(std::string(header.key().getStringView()))); + if (entry == nullptr || (entry->value() != header.value().getStringView())) { state->equal = false; return Http::HeaderMap::Iterate::Break; } @@ -371,7 +371,7 @@ std::string TestHeaderMapImpl::get_(const LowerCaseString& key) { if (!header) { return EMPTY_STRING; } else { - return header->value().c_str(); + return std::string(header->value().getStringView()); } } diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 7318664e40f33..650160733bda7 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -466,7 +466,8 @@ class TestHeaderMapImpl : public HeaderMapImpl { p.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { std::ostream* local_os = static_cast(context); - *local_os << header.key().c_str() << " " << header.value().c_str() << std::endl; + *local_os << header.key().getStringView() << " " << header.value().getStringView() + << std::endl; return HeaderMap::Iterate::Continue; }, &os); From 7dad4a15ab5b96adf77db78f3abd27d353ece6c4 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Tue, 16 Apr 2019 12:25:15 -0700 Subject: [PATCH 131/165] upstream: partition hosts in a single pass (#6440) This fixes a performance regression that was introduced when support for degraded hosts was added: the list of hosts would be iterated over four times instead of the previous two (one for the hosts list, one for the hosts per locality list). This PR changes both partition operations to only iterate over the list of hosts once. Signed-off-by: Snow Pettersen --- include/envoy/upstream/BUILD | 1 + include/envoy/upstream/types.h | 5 + include/envoy/upstream/upstream.h | 25 +++-- .../common/upstream/cluster_manager_impl.cc | 6 +- source/common/upstream/subset_lb.cc | 18 ++-- source/common/upstream/upstream_impl.cc | 96 +++++++++++------- source/common/upstream/upstream_impl.h | 32 +++--- .../upstream/cluster_manager_impl_test.cc | 98 +++++++++++++------ .../upstream/load_balancer_benchmark.cc | 7 +- .../upstream/load_balancer_impl_test.cc | 60 ++++++++---- .../upstream/load_balancer_simulation_test.cc | 15 +-- .../upstream/original_dst_cluster_test.cc | 4 +- test/common/upstream/subset_lb_test.cc | 12 ++- test/common/upstream/upstream_impl_test.cc | 72 ++++++++------ 14 files changed, 293 insertions(+), 158 deletions(-) diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index 31edcd0dff059..e20c85e1d24ec 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -145,6 +145,7 @@ envoy_cc_library( "//include/envoy/runtime:runtime_interface", "//include/envoy/ssl:context_interface", "//include/envoy/ssl:context_manager_interface", + "//include/envoy/upstream:types_interface", ], ) diff --git a/include/envoy/upstream/types.h b/include/envoy/upstream/types.h index 59bd3de65ffa8..b73e66b220ea5 100644 --- a/include/envoy/upstream/types.h +++ b/include/envoy/upstream/types.h @@ -51,5 +51,10 @@ struct HealthyAvailability : PriorityAvailability { using PriorityAvailability::PriorityAvailability; }; +// Phantom type indicating that the type is related to healthy hosts. +struct Healthy {}; +// Phantom type indicating that the type is related to degraded hosts. +struct Degraded {}; + } // namespace Upstream } // namespace Envoy diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 214096d79390e..f6599c0b7254a 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -22,6 +22,7 @@ #include "envoy/upstream/locality.h" #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/resource_manager.h" +#include "envoy/upstream/types.h" #include "absl/types/optional.h" @@ -191,10 +192,15 @@ class Host : virtual public HostDescription { typedef std::shared_ptr HostConstSharedPtr; typedef std::vector HostVector; +typedef Phantom HealthyHostVector; +typedef Phantom DegradedHostVector; typedef std::unordered_map HostMap; typedef std::shared_ptr HostVectorSharedPtr; typedef std::shared_ptr HostVectorConstSharedPtr; +typedef std::shared_ptr HealthyHostVectorConstSharedPtr; +typedef std::shared_ptr DegradedHostVectorConstSharedPtr; + typedef std::unique_ptr HostListPtr; typedef std::unordered_map LocalityWeightsMap; @@ -221,20 +227,21 @@ class HostsPerLocality { virtual const std::vector& get() const PURE; /** - * Clone object with a filter predicate. - * @param predicate on Host entries. - * @return HostsPerLocalityConstSharedPtr clone of the HostsPerLocality with only - * hosts according to predicate. + * Clone object with multiple filter predicates. Returns a vector of clones, each with host that + * match the provided predicates. + * @param predicates vector of predicates on Host entries. + * @return vector of HostsPerLocalityConstSharedPtr clones of the HostsPerLocality that match + * hosts according to predicates. */ - virtual std::shared_ptr - filter(std::function predicate) const PURE; + virtual std::vector> + filter(const std::vector>& predicates) const PURE; /** * Clone object. * @return HostsPerLocalityConstSharedPtr clone of the HostsPerLocality. */ std::shared_ptr clone() const { - return filter([](const Host&) { return true; }); + return filter({[](const Host&) { return true; }})[0]; } }; @@ -366,8 +373,8 @@ class PrioritySet { */ struct UpdateHostsParams { HostVectorConstSharedPtr hosts; - HostVectorConstSharedPtr healthy_hosts; - HostVectorConstSharedPtr degraded_hosts; + HealthyHostVectorConstSharedPtr healthy_hosts; + DegradedHostVectorConstSharedPtr degraded_hosts; HostsPerLocalityConstSharedPtr hosts_per_locality; HostsPerLocalityConstSharedPtr healthy_hosts_per_locality; HostsPerLocalityConstSharedPtr degraded_hosts_per_locality; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 8880e01ff547f..c73bf5b639c92 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -678,8 +678,10 @@ void ClusterManagerImpl::postThreadLocalClusterUpdate(const Cluster& cluster, ui // TODO(htuch): Can we skip these copies by exporting out const shared_ptr from HostSet? HostVectorConstSharedPtr hosts_copy(new HostVector(host_set->hosts())); - HostVectorConstSharedPtr healthy_hosts_copy(new HostVector(host_set->healthyHosts())); - HostVectorConstSharedPtr degraded_hosts_copy(new HostVector(host_set->degradedHosts())); + HealthyHostVectorConstSharedPtr healthy_hosts_copy( + new HealthyHostVector(host_set->healthyHosts())); + DegradedHostVectorConstSharedPtr degraded_hosts_copy( + new DegradedHostVector(host_set->degradedHosts())); HostsPerLocalityConstSharedPtr hosts_per_locality_copy = host_set->hostsPerLocality().clone(); HostsPerLocalityConstSharedPtr healthy_hosts_per_locality_copy = host_set->healthyHostsPerLocality().clone(); diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 4c2fdc44ab9b4..0e47d3955410f 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -535,19 +535,19 @@ void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added, } } - auto healthy_hosts = std::make_shared(); - healthy_hosts->reserve(original_host_set_.healthyHosts().size()); + auto healthy_hosts = std::make_shared(); + healthy_hosts->get().reserve(original_host_set_.healthyHosts().size()); for (const auto& host : original_host_set_.healthyHosts()) { if (cached_predicate(*host)) { - healthy_hosts->emplace_back(host); + healthy_hosts->get().emplace_back(host); } } - auto degraded_hosts = std::make_shared(); - degraded_hosts->reserve(original_host_set_.degradedHosts().size()); + auto degraded_hosts = std::make_shared(); + degraded_hosts->get().reserve(original_host_set_.degradedHosts().size()); for (const auto& host : original_host_set_.degradedHosts()) { if (cached_predicate(*host)) { - degraded_hosts->emplace_back(host); + degraded_hosts->get().emplace_back(host); } } @@ -561,13 +561,13 @@ void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added, hosts_per_locality = std::make_shared( *hosts, original_host_set_.hostsPerLocality().hasLocalLocality()); } else { - hosts_per_locality = original_host_set_.hostsPerLocality().filter(cached_predicate); + hosts_per_locality = original_host_set_.hostsPerLocality().filter({cached_predicate})[0]; } HostsPerLocalityConstSharedPtr healthy_hosts_per_locality = - original_host_set_.healthyHostsPerLocality().filter(cached_predicate); + original_host_set_.healthyHostsPerLocality().filter({cached_predicate})[0]; HostsPerLocalityConstSharedPtr degraded_hosts_per_locality = - original_host_set_.degradedHostsPerLocality().filter(cached_predicate); + original_host_set_.degradedHostsPerLocality().filter({cached_predicate})[0]; // We can use the cached predicate here, since we trust that the hosts in hosts_added were also // present in the list of all hosts. diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 0a688e3d59345..fbc27861a3f4f 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -274,23 +274,39 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu void HostImpl::weight(uint32_t new_weight) { weight_ = std::max(1U, std::min(128U, new_weight)); } -HostsPerLocalityConstSharedPtr -HostsPerLocalityImpl::filter(std::function predicate) const { - auto* filtered_clone = new HostsPerLocalityImpl(); - HostsPerLocalityConstSharedPtr shared_filtered_clone{filtered_clone}; +std::vector HostsPerLocalityImpl::filter( + const std::vector>& predicates) const { + // We keep two lists: one for being able to mutate the clone and one for returning to the caller. + // Creating them both at the start avoids iterating over the mutable values at the end to convert + // them to a const pointer. + std::vector> mutable_clones; + std::vector filtered_clones; + + for (size_t i = 0; i < predicates.size(); ++i) { + mutable_clones.emplace_back(std::make_shared()); + filtered_clones.emplace_back(mutable_clones.back()); + mutable_clones.back()->local_ = local_; + } - filtered_clone->local_ = local_; for (const auto& hosts_locality : hosts_per_locality_) { - HostVector current_locality_hosts; + std::vector current_locality_hosts; + current_locality_hosts.resize(predicates.size()); + + // Since # of hosts >> # of predicates, we iterate over the hosts in the outer loop. for (const auto& host : hosts_locality) { - if (predicate(*host)) { - current_locality_hosts.emplace_back(host); + for (size_t i = 0; i < predicates.size(); ++i) { + if (predicates[i](*host)) { + current_locality_hosts[i].emplace_back(host); + } } } - filtered_clone->hosts_per_locality_.push_back(std::move(current_locality_hosts)); + + for (size_t i = 0; i < predicates.size(); ++i) { + mutable_clones[i]->hosts_per_locality_.push_back(std::move(current_locality_hosts[0])); + } } - return shared_filtered_clone; + return filtered_clones; } void HostSetImpl::updateHosts(PrioritySet::UpdateHostsParams&& update_hosts_params, @@ -310,11 +326,11 @@ void HostSetImpl::updateHosts(PrioritySet::UpdateHostsParams&& update_hosts_para locality_weights_ = std::move(locality_weights); rebuildLocalityScheduler(healthy_locality_scheduler_, healthy_locality_entries_, - *healthy_hosts_per_locality_, *healthy_hosts_, hosts_per_locality_, + *healthy_hosts_per_locality_, healthy_hosts_->get(), hosts_per_locality_, locality_weights_, overprovisioning_factor_); rebuildLocalityScheduler(degraded_locality_scheduler_, degraded_locality_entries_, - *degraded_hosts_per_locality_, *degraded_hosts_, hosts_per_locality_, - locality_weights_, overprovisioning_factor_); + *degraded_hosts_per_locality_, degraded_hosts_->get(), + hosts_per_locality_, locality_weights_, overprovisioning_factor_); runUpdateCallbacks(hosts_added, hosts_removed); } @@ -387,25 +403,27 @@ PrioritySet::UpdateHostsParams HostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality) { return updateHostsParams(std::move(hosts), std::move(hosts_per_locality), - std::make_shared(), HostsPerLocalityImpl::empty()); + std::make_shared(), + HostsPerLocalityImpl::empty()); } PrioritySet::UpdateHostsParams HostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, + HealthyHostVectorConstSharedPtr healthy_hosts, HostsPerLocalityConstSharedPtr healthy_hosts_per_locality) { return updateHostsParams(std::move(hosts), std::move(hosts_per_locality), std::move(healthy_hosts), std::move(healthy_hosts_per_locality), - std::make_shared(), HostsPerLocalityImpl::empty()); + std::make_shared(), + HostsPerLocalityImpl::empty()); } PrioritySet::UpdateHostsParams HostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, + HealthyHostVectorConstSharedPtr healthy_hosts, HostsPerLocalityConstSharedPtr healthy_hosts_per_locality, - HostVectorConstSharedPtr degraded_hosts, + DegradedHostVectorConstSharedPtr degraded_hosts, HostsPerLocalityConstSharedPtr degraded_hosts_per_locality) { return PrioritySet::UpdateHostsParams{std::move(hosts), std::move(healthy_hosts), @@ -418,16 +436,15 @@ HostSetImpl::updateHostsParams(HostVectorConstSharedPtr hosts, PrioritySet::UpdateHostsParams HostSetImpl::partitionHosts(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality) { - auto healthy_hosts = ClusterImplBase::createHostList(*hosts, Host::Health::Healthy); - auto degraded_hosts = ClusterImplBase::createHostList(*hosts, Host::Health::Degraded); - auto healthy_hosts_per_locality = - ClusterImplBase::createHostLists(*hosts_per_locality, Host::Health::Healthy); - auto degraded_hosts_per_locality = - ClusterImplBase::createHostLists(*hosts_per_locality, Host::Health::Degraded); + auto healthy_and_degraded_hosts = ClusterImplBase::partitionHostList(*hosts); + auto healthy_and_degraded_hosts_per_locality = + ClusterImplBase::partitionHostsPerLocality(*hosts_per_locality); return updateHostsParams(std::move(hosts), std::move(hosts_per_locality), - std::move(healthy_hosts), std::move(healthy_hosts_per_locality), - std::move(degraded_hosts), std::move(degraded_hosts_per_locality)); + std::move(healthy_and_degraded_hosts.first), + std::move(healthy_and_degraded_hosts_per_locality.first), + std::move(healthy_and_degraded_hosts.second), + std::move(healthy_and_degraded_hosts_per_locality.second)); } double HostSetImpl::effectiveLocalityWeight(uint32_t index, @@ -678,21 +695,30 @@ ClusterImplBase::ClusterImplBase( }); } -HostVectorConstSharedPtr ClusterImplBase::createHostList(const HostVector& hosts, - Host::Health health) { - HostVectorSharedPtr healthy_list(new HostVector()); +std::pair +ClusterImplBase::partitionHostList(const HostVector& hosts) { + auto healthy_list = std::make_shared(); + auto degraded_list = std::make_shared(); + for (const auto& host : hosts) { - if (host->health() == health) { - healthy_list->emplace_back(host); + if (host->health() == Host::Health::Healthy) { + healthy_list->get().emplace_back(host); + } + if (host->health() == Host::Health::Degraded) { + degraded_list->get().emplace_back(host); } } - return healthy_list; + return {healthy_list, degraded_list}; } -HostsPerLocalityConstSharedPtr ClusterImplBase::createHostLists(const HostsPerLocality& hosts, - Host::Health health) { - return hosts.filter([&health](const Host& host) { return host.health() == health; }); +std::pair +ClusterImplBase::partitionHostsPerLocality(const HostsPerLocality& hosts) { + auto filtered_clones = + hosts.filter({[](const Host& host) { return host.health() == Host::Health::Healthy; }, + [](const Host& host) { return host.health() == Host::Health::Degraded; }}); + + return {std::move(filtered_clones[0]), std::move(filtered_clones[1])}; } bool ClusterInfoImpl::maintenanceMode() const { diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index f2ce1a7e4c3f9..cb10291ea233f 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -255,7 +255,8 @@ class HostsPerLocalityImpl : public HostsPerLocality { bool hasLocalLocality() const override { return local_; } const std::vector& get() const override { return hosts_per_locality_; } - HostsPerLocalityConstSharedPtr filter(std::function predicate) const override; + std::vector + filter(const std::vector>& predicate) const override; // The const shared pointer for the empty HostsPerLocalityImpl. static HostsPerLocalityConstSharedPtr empty() { @@ -279,8 +280,8 @@ class HostSetImpl : public HostSet { : priority_(priority), overprovisioning_factor_(overprovisioning_factor.has_value() ? overprovisioning_factor.value() : kDefaultOverProvisioningFactor), - hosts_(new HostVector()), healthy_hosts_(new HostVector()), - degraded_hosts_(new HostVector()) {} + hosts_(new HostVector()), healthy_hosts_(new HealthyHostVector()), + degraded_hosts_(new DegradedHostVector()) {} /** * Install a callback that will be invoked when the host set membership changes. @@ -293,8 +294,8 @@ class HostSetImpl : public HostSet { // Upstream::HostSet const HostVector& hosts() const override { return *hosts_; } - const HostVector& healthyHosts() const override { return *healthy_hosts_; } - const HostVector& degradedHosts() const override { return *degraded_hosts_; } + const HostVector& healthyHosts() const override { return healthy_hosts_->get(); } + const HostVector& degradedHosts() const override { return degraded_hosts_->get(); } const HostsPerLocality& hostsPerLocality() const override { return *hosts_per_locality_; } const HostsPerLocality& healthyHostsPerLocality() const override { return *healthy_hosts_per_locality_; @@ -315,14 +316,14 @@ class HostSetImpl : public HostSet { static PrioritySet::UpdateHostsParams updateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, + HealthyHostVectorConstSharedPtr healthy_hosts, HostsPerLocalityConstSharedPtr healthy_hosts_per_locality); static PrioritySet::UpdateHostsParams updateHostsParams(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality, - HostVectorConstSharedPtr healthy_hosts, + HealthyHostVectorConstSharedPtr healthy_hosts, HostsPerLocalityConstSharedPtr healthy_hosts_per_locality, - HostVectorConstSharedPtr degraded_hosts, + DegradedHostVectorConstSharedPtr degraded_hosts, HostsPerLocalityConstSharedPtr degraded_hosts_per_locality); static PrioritySet::UpdateHostsParams partitionHosts(HostVectorConstSharedPtr hosts, HostsPerLocalityConstSharedPtr hosts_per_locality); @@ -349,8 +350,8 @@ class HostSetImpl : public HostSet { uint32_t priority_; uint32_t overprovisioning_factor_; HostVectorConstSharedPtr hosts_; - HostVectorConstSharedPtr healthy_hosts_; - HostVectorConstSharedPtr degraded_hosts_; + HealthyHostVectorConstSharedPtr healthy_hosts_; + DegradedHostVectorConstSharedPtr degraded_hosts_; HostsPerLocalityConstSharedPtr hosts_per_locality_{HostsPerLocalityImpl::empty()}; HostsPerLocalityConstSharedPtr healthy_hosts_per_locality_{HostsPerLocalityImpl::empty()}; HostsPerLocalityConstSharedPtr degraded_hosts_per_locality_{HostsPerLocalityImpl::empty()}; @@ -636,9 +637,14 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable + partitionHostList(const HostVector& hosts); + // Partitions the provided list of hosts per locality into two new lists containing the healthy + // and degraded hosts respectively. + static std::pair + partitionHostsPerLocality(const HostsPerLocality& hosts); // Upstream::Cluster HealthChecker* healthChecker() override { return health_checker_.get(); } diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index c826b50cea322..e60a51b00ed02 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -2175,8 +2175,11 @@ TEST_F(ClusterManagerImplTest, MergedUpdates) { // The first update should be applied immediately, since it's not mergeable. hosts_removed.push_back((*hosts)[0]); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_merge_cancelled").value()); @@ -2184,11 +2187,17 @@ TEST_F(ClusterManagerImplTest, MergedUpdates) { // These calls should be merged, since there are no added/removed hosts. hosts_removed.clear(); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_merge_cancelled").value()); @@ -2203,8 +2212,11 @@ TEST_F(ClusterManagerImplTest, MergedUpdates) { hosts_removed.clear(); hosts_added.push_back((*hosts)[0]); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(2, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_merge_cancelled").value()); @@ -2214,18 +2226,27 @@ TEST_F(ClusterManagerImplTest, MergedUpdates) { (*hosts)[0]->metadata(buildMetadata("v1")); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); (*hosts)[0]->healthFlagSet(Host::HealthFlag::FAILED_EDS_HEALTH); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); (*hosts)[0]->weight(100); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); // Updates not delivered yet. EXPECT_EQ(2, factory_.stats_.counter("cluster_manager.cluster_updated").value()); @@ -2235,8 +2256,11 @@ TEST_F(ClusterManagerImplTest, MergedUpdates) { // Remove the host again, should cancel the scheduled update and be delivered immediately. hosts_removed.push_back((*hosts)[0]); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(3, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); @@ -2269,8 +2293,11 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesOutOfWindow) { // cluster.info()->lbConfig().update_merge_window() in ClusterManagerImpl::scheduleUpdate. time_system_.sleep(std::chrono::seconds(60)); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.update_out_of_merge_window").value()); @@ -2294,8 +2321,11 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesInsideWindow) { // default-initialized to a monotonic time of 0, as is SimulatedTimeSystem::monotonic_time_. time_system_.sleep(std::chrono::seconds(2)); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_out_of_merge_window").value()); @@ -2327,8 +2357,11 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesOutOfWindowDisabled) { // The first update should be applied immediately, because even though it's mergeable // and outside a merge window, merging is disabled. cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_out_of_merge_window").value()); @@ -2391,8 +2424,11 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { // The first update should be applied immediately, since it's not mergeable. hosts_removed.push_back((*hosts)[0]); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_merge_cancelled").value()); @@ -2400,11 +2436,17 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { // These calls should be merged, since there are no added/removed hosts. hosts_removed.clear(); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); cluster.prioritySet().updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_merge_cancelled").value()); diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index 7cdade43e5fb9..329e4eef53c4b 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -29,8 +29,11 @@ class BaseTester { } HostVectorConstSharedPtr updated_hosts{new HostVector(hosts)}; priority_set_.updateHosts( - 0, HostSetImpl::updateHostsParams(updated_hosts, nullptr, updated_hosts, nullptr), {}, - hosts, {}, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(updated_hosts, nullptr, + std::make_shared(*updated_hosts), + nullptr), + {}, hosts, {}, absl::nullopt); } PrioritySetImpl priority_set_; diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 2d167d09d746b..801064893fa05 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -555,7 +555,8 @@ TEST_P(FailoverTest, ExtendPrioritiesWithLocalPrioritySet) { HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82")})); local_priority_set_->updateHosts( 0, - HostSetImpl::updateHostsParams(hosts, HostsPerLocalityImpl::empty(), hosts, + HostSetImpl::updateHostsParams(hosts, HostsPerLocalityImpl::empty(), + std::make_shared(*hosts), HostsPerLocalityImpl::empty()), {}, empty_host_vector_, empty_host_vector_, absl::nullopt); EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr)); @@ -825,8 +826,11 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareSmallCluster) { common_config_.mutable_zone_aware_lb_config()->mutable_min_cluster_size()->set_value(7); init(true); local_priority_set_->updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - empty_host_vector_, empty_host_vector_, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, empty_host_vector_, empty_host_vector_, absl::nullopt); EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.healthy_panic_threshold", 0)) .WillRepeatedly(Return(50)); @@ -850,8 +854,11 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareSmallCluster) { .WillRepeatedly(Return(1)); // Trigger reload. local_priority_set_->updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - empty_host_vector_, empty_host_vector_, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, empty_host_vector_, empty_host_vector_, absl::nullopt); EXPECT_EQ(hostSet().healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr)); } @@ -876,10 +883,12 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareDifferentZoneSize) { common_config_.mutable_zone_aware_lb_config()->mutable_routing_enabled()->set_value(98); common_config_.mutable_zone_aware_lb_config()->mutable_min_cluster_size()->set_value(7); init(true); - local_priority_set_->updateHosts(0, - HostSetImpl::updateHostsParams(hosts, local_hosts_per_locality, - hosts, local_hosts_per_locality), - {}, empty_host_vector_, empty_host_vector_, absl::nullopt); + local_priority_set_->updateHosts( + 0, + HostSetImpl::updateHostsParams(hosts, local_hosts_per_locality, + std::make_shared(*hosts), + local_hosts_per_locality), + {}, empty_host_vector_, empty_host_vector_, absl::nullopt); EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.healthy_panic_threshold", 100)) .WillRepeatedly(Return(50)); @@ -916,8 +925,11 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingLargeZoneSwitchOnOff) { hostSet().healthy_hosts_per_locality_ = hosts_per_locality; init(true); local_priority_set_->updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - empty_host_vector_, empty_host_vector_, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, empty_host_vector_, empty_host_vector_, absl::nullopt); // There is only one host in the given zone for zone aware routing. EXPECT_EQ(hostSet().healthy_hosts_per_locality_->get()[0][0], lb_->chooseHost(nullptr)); @@ -966,7 +978,8 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingSmallZone) { init(true); local_priority_set_->updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality, local_hosts, + HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality, + std::make_shared(*local_hosts), local_hosts_per_locality), {}, empty_host_vector_, empty_host_vector_, absl::nullopt); @@ -1039,7 +1052,8 @@ TEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) { auto local_hosts_per_locality_shared = makeHostsPerLocality(std::move(local_hosts_per_locality)); local_priority_set_->updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality_shared, local_hosts, + HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality_shared, + std::make_shared(*local_hosts), local_hosts_per_locality_shared), {}, empty_host_vector_, empty_host_vector_, absl::nullopt); @@ -1062,8 +1076,11 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingOneZone) { hostSet().healthy_hosts_per_locality_ = hosts_per_locality; init(true); local_priority_set_->updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - empty_host_vector_, empty_host_vector_, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, empty_host_vector_, empty_host_vector_, absl::nullopt); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); } @@ -1078,8 +1095,11 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNotHealthy) { hostSet().healthy_hosts_per_locality_ = hosts_per_locality; init(true); local_priority_set_->updateHosts( - 0, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - empty_host_vector_, empty_host_vector_, absl::nullopt); + 0, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, empty_host_vector_, empty_host_vector_, absl::nullopt); // local zone has no healthy hosts, take from the all healthy hosts. EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); @@ -1111,7 +1131,8 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmpty) { init(true); local_priority_set_->updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality, local_hosts, + HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality, + std::make_shared(*local_hosts), local_hosts_per_locality), {}, empty_host_vector_, empty_host_vector_, absl::nullopt); @@ -1142,7 +1163,8 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNoLocalLocality) { init(true); local_priority_set_->updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality, local_hosts, + HostSetImpl::updateHostsParams(local_hosts, local_hosts_per_locality, + std::make_shared(*local_hosts), local_hosts_per_locality), {}, empty_host_vector_, empty_host_vector_, absl::nullopt); diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index 7167c9fd404b0..76f820a161bdd 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -54,10 +54,12 @@ TEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) { } HostVectorConstSharedPtr updated_hosts{new HostVector(hosts)}; HostsPerLocalitySharedPtr updated_locality_hosts{new HostsPerLocalityImpl(hosts)}; - priority_set.updateHosts(0, - HostSetImpl::updateHostsParams(updated_hosts, updated_locality_hosts, - updated_hosts, updated_locality_hosts), - {}, hosts, {}, absl::nullopt); + priority_set.updateHosts( + 0, + HostSetImpl::updateHostsParams(updated_hosts, updated_locality_hosts, + std::make_shared(*updated_hosts), + updated_locality_hosts), + {}, hosts, {}, absl::nullopt); Stats::IsolatedStoreImpl stats_store; ClusterStats stats{ClusterInfoImpl::generateStats(stats_store)}; @@ -160,8 +162,9 @@ class DISABLED_SimulationTest : public testing::Test { auto per_zone_local_shared = makeHostsPerLocality(std::move(per_zone_local)); local_priority_set_->updateHosts( 0, - HostSetImpl::updateHostsParams(originating_hosts, per_zone_local_shared, - originating_hosts, per_zone_local_shared), + HostSetImpl::updateHostsParams( + originating_hosts, per_zone_local_shared, + std::make_shared(*originating_hosts), per_zone_local_shared), {}, empty_vector_, empty_vector_, absl::nullopt); HostConstSharedPtr selected = lb.chooseHost(nullptr); diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 79360a0cc48d0..23314f2f25fb3 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -461,8 +461,8 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { // Update second hostset accordingly; HostVectorSharedPtr new_hosts( new HostVector(cluster_->prioritySet().hostSetsPerPriority()[0]->hosts())); - HostVectorSharedPtr healthy_hosts( - new HostVector(cluster_->prioritySet().hostSetsPerPriority()[0]->hosts())); + auto healthy_hosts = std::make_shared( + cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()); const HostsPerLocalityConstSharedPtr empty_hosts_per_locality{new HostsPerLocalityImpl()}; second.updateHosts(0, diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 6dbd9b62c047e..f2b97e03278d6 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -216,7 +216,8 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { local_priority_set_.updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, local_hosts_, + HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, + std::make_shared(*local_hosts_), local_hosts_per_locality_), {}, {}, {}, absl::nullopt); @@ -314,7 +315,8 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { if (GetParam() == REMOVES_FIRST && !remove.empty()) { local_priority_set_.updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, local_hosts_, + HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, + std::make_shared(*local_hosts_), local_hosts_per_locality_), {}, {}, remove, absl::nullopt); } @@ -330,14 +332,16 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { if (!add.empty()) { local_priority_set_.updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, local_hosts_, + HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, + std::make_shared(*local_hosts_), local_hosts_per_locality_), {}, add, {}, absl::nullopt); } } else if (!add.empty() || !remove.empty()) { local_priority_set_.updateHosts( 0, - HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, local_hosts_, + HostSetImpl::updateHostsParams(local_hosts_, local_hosts_per_locality_, + std::make_shared(*local_hosts_), local_hosts_per_locality_), {}, add, remove, absl::nullopt); } diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 8042842729993..64864f91f29fc 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -69,7 +69,8 @@ std::list hostListToAddresses(const HostVector& hosts) { return addresses; } -std::shared_ptr +template +std::shared_ptr makeHostsFromHostsPerLocality(HostsPerLocalityConstSharedPtr hosts_per_locality) { HostVector hosts; @@ -79,7 +80,7 @@ makeHostsFromHostsPerLocality(HostsPerLocalityConstSharedPtr hosts_per_locality) } } - return std::make_shared(hosts); + return std::make_shared(hosts); } struct ResolverData { @@ -1589,7 +1590,9 @@ class TestBatchUpdateCb : public PrioritySet::BatchUpdateCb { HostVector hosts_removed{}; host_update_cb.updateHosts( 0, - HostSetImpl::updateHostsParams(hosts_, hosts_per_locality_, hosts_, hosts_per_locality_), + HostSetImpl::updateHostsParams(hosts_, hosts_per_locality_, + std::make_shared(*hosts_), + hosts_per_locality_), {}, hosts_added, hosts_removed, absl::nullopt); } @@ -1600,7 +1603,8 @@ class TestBatchUpdateCb : public PrioritySet::BatchUpdateCb { HostVector hosts_removed{hosts_->front()}; host_update_cb.updateHosts( 1, - HostSetImpl::updateHostsParams(empty_hosts, HostsPerLocalityImpl::empty(), empty_hosts, + HostSetImpl::updateHostsParams(empty_hosts, HostsPerLocalityImpl::empty(), + std::make_shared(*empty_hosts), HostsPerLocalityImpl::empty()), {}, hosts_added, hosts_removed, absl::nullopt); } @@ -1653,8 +1657,11 @@ TEST(PrioritySet, Extend) { HostVector hosts_removed{}; priority_set.updateHosts( - 1, HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), {}, - hosts_added, hosts_removed, absl::nullopt); + 1, + HostSetImpl::updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); } EXPECT_EQ(1, priority_changes); EXPECT_EQ(1, membership_changes); @@ -2182,9 +2189,9 @@ TEST(HostsPerLocalityImpl, Filter) { { std::vector locality_hosts = {{host_0}, {host_1}}; const auto filtered = - HostsPerLocalityImpl(std::move(locality_hosts), false).filter([&host_0](const Host& host) { + HostsPerLocalityImpl(std::move(locality_hosts), false).filter({[&host_0](const Host& host) { return &host == host_0.get(); - }); + }})[0]; EXPECT_FALSE(filtered->hasLocalLocality()); const std::vector expected_locality_hosts = {{host_0}, {}}; EXPECT_EQ(expected_locality_hosts, filtered->get()); @@ -2193,9 +2200,9 @@ TEST(HostsPerLocalityImpl, Filter) { { std::vector locality_hosts = {{host_0}, {host_1}}; auto filtered = - HostsPerLocalityImpl(std::move(locality_hosts), true).filter([&host_1](const Host& host) { + HostsPerLocalityImpl(std::move(locality_hosts), true).filter({[&host_1](const Host& host) { return &host == host_1.get(); - }); + }})[0]; EXPECT_TRUE(filtered->hasLocalLocality()); const std::vector expected_locality_hosts = {{}, {host_1}}; EXPECT_EQ(expected_locality_hosts, filtered->get()); @@ -2236,9 +2243,10 @@ TEST_F(HostSetImplLocalityTest, EmptyLocality) { makeHostsPerLocality({{hosts_[0], hosts_[1], hosts_[2]}, {}}); LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1}}; auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality); - host_set_.updateHosts( - HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), - locality_weights, {}, {}, absl::nullopt); + host_set_.updateHosts(HostSetImpl::updateHostsParams( + hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + locality_weights, {}, {}, absl::nullopt); // Verify that we are not RRing between localities. EXPECT_EQ(0, host_set_.chooseHealthyLocality().value()); EXPECT_EQ(0, host_set_.chooseHealthyLocality().value()); @@ -2249,9 +2257,10 @@ TEST_F(HostSetImplLocalityTest, AllZeroWeights) { HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}}); LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{0, 0}}; auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality); - host_set_.updateHosts( - HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), - locality_weights, {}, {}); + host_set_.updateHosts(HostSetImpl::updateHostsParams( + hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + locality_weights, {}, {}); EXPECT_FALSE(host_set_.chooseHealthyLocality().has_value()); } @@ -2261,9 +2270,10 @@ TEST_F(HostSetImplLocalityTest, Unweighted) { makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}, {hosts_[2]}}); LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1, 1}}; auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality); - host_set_.updateHosts( - HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), - locality_weights, {}, {}, absl::nullopt); + host_set_.updateHosts(HostSetImpl::updateHostsParams( + hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + locality_weights, {}, {}, absl::nullopt); EXPECT_EQ(0, host_set_.chooseHealthyLocality().value()); EXPECT_EQ(1, host_set_.chooseHealthyLocality().value()); EXPECT_EQ(2, host_set_.chooseHealthyLocality().value()); @@ -2277,9 +2287,10 @@ TEST_F(HostSetImplLocalityTest, Weighted) { HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}}); LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 2}}; auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality); - host_set_.updateHosts( - HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), - locality_weights, {}, {}, absl::nullopt); + host_set_.updateHosts(HostSetImpl::updateHostsParams( + hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + locality_weights, {}, {}, absl::nullopt); EXPECT_EQ(1, host_set_.chooseHealthyLocality().value()); EXPECT_EQ(0, host_set_.chooseHealthyLocality().value()); EXPECT_EQ(1, host_set_.chooseHealthyLocality().value()); @@ -2294,9 +2305,10 @@ TEST_F(HostSetImplLocalityTest, MissingWeight) { makeHostsPerLocality({{hosts_[0]}, {hosts_[1]}, {hosts_[2]}}); LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 0, 1}}; auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality); - host_set_.updateHosts( - HostSetImpl::updateHostsParams(hosts, hosts_per_locality, hosts, hosts_per_locality), - locality_weights, {}, {}, absl::nullopt); + host_set_.updateHosts(HostSetImpl::updateHostsParams( + hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + locality_weights, {}, {}, absl::nullopt); EXPECT_EQ(0, host_set_.chooseHealthyLocality().value()); EXPECT_EQ(2, host_set_.chooseHealthyLocality().value()); EXPECT_EQ(0, host_set_.chooseHealthyLocality().value()); @@ -2320,9 +2332,10 @@ TEST_F(HostSetImplLocalityTest, UnhealthyFailover) { auto hosts = makeHostsFromHostsPerLocality(hosts_per_locality); host_set_.updateHosts( - HostSetImpl::updateHostsParams(hosts, hosts_per_locality, - makeHostsFromHostsPerLocality(healthy_hosts_per_locality), - healthy_hosts_per_locality), + HostSetImpl::updateHostsParams( + hosts, hosts_per_locality, + makeHostsFromHostsPerLocality(healthy_hosts_per_locality), + healthy_hosts_per_locality), locality_weights, {}, {}, absl::nullopt); }; @@ -2366,7 +2379,8 @@ TEST(OverProvisioningFactorTest, LocalityPickChanges) { // Healthy ratio: (1/2, 1). HostsPerLocalitySharedPtr healthy_hosts_per_locality = makeHostsPerLocality({{hosts[0]}, {hosts[2]}}); - auto healthy_hosts = makeHostsFromHostsPerLocality(healthy_hosts_per_locality); + auto healthy_hosts = + makeHostsFromHostsPerLocality(healthy_hosts_per_locality); host_set.updateHosts(HostSetImpl::updateHostsParams(std::make_shared(hosts), hosts_per_locality, healthy_hosts, healthy_hosts_per_locality), From f235f560b8b0d4d1ce8c3c4a17134aafb171e0a8 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 16 Apr 2019 12:47:36 -0700 Subject: [PATCH 132/165] lua: align the allocated memory (#6599) Description: This patch aligns the memory that is allocated by Lua. Previously, without alignment, Envoy experienced segfault when constructing `StreamWrapperHandler` using the placement new operator on the pre-allocated (via `new_luauserdata` API) memory by Lua. Risk Level: Medium for Lua users. Testing: Enable test in UBSAN on Linux as well. Docs Changes: N/A Release Notes: N/A Fixes #5551 Signed-off-by: Lizan Zhou --- .bazelrc | 1 + ci/build_setup.sh | 1 - source/extensions/filters/common/lua/lua.h | 40 ++++++++++++++----- .../extensions/filters/common/lua/lua_test.cc | 4 +- tools/spelling_dictionary.txt | 1 + 5 files changed, 36 insertions(+), 11 deletions(-) diff --git a/.bazelrc b/.bazelrc index 78d41c35d4a26..88a480bc0b0ff 100644 --- a/.bazelrc +++ b/.bazelrc @@ -22,6 +22,7 @@ build:asan --define signal_trace=disabled build:asan --copt -DADDRESS_SANITIZER=1 build:asan --copt -D__SANITIZE_ADDRESS__ build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true +build:asan --test_env=UBSAN_OPTIONS=halt_on_error=true:print_stacktrace=1 build:asan --test_env=ASAN_SYMBOLIZER_PATH # Clang ASAN/UBSAN diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 025fea240cc6f..9941d1b20b8cd 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -73,7 +73,6 @@ export BAZEL_BUILD_OPTIONS="--strategy=Genrule=standalone --spawn_strategy=stand --verbose_failures ${BAZEL_OPTIONS} --action_env=HOME --action_env=PYTHONUSERBASE \ --jobs=${NUM_CPUS} --show_task_finish --experimental_generate_json_trace_profile ${BAZEL_BUILD_EXTRA_OPTIONS}" export BAZEL_TEST_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HOME --test_env=PYTHONUSERBASE \ - --test_env=UBSAN_OPTIONS=print_stacktrace=1 \ --cache_test_results=no --test_output=all ${BAZEL_EXTRA_TEST_OPTIONS}" [[ "${BAZEL_EXPUNGE}" == "1" ]] && "${BAZEL}" clean --expunge diff --git a/source/extensions/filters/common/lua/lua.h b/source/extensions/filters/common/lua/lua.h index a824a0c4634a8..872e0dba2e14e 100644 --- a/source/extensions/filters/common/lua/lua.h +++ b/source/extensions/filters/common/lua/lua.h @@ -44,7 +44,8 @@ namespace Lua { */ #define DECLARE_LUA_FUNCTION_EX(Class, Name, Index) \ static int static_##Name(lua_State* state) { \ - Class* object = static_cast(luaL_checkudata(state, Index, typeid(Class).name())); \ + Class* object = ::Envoy::Extensions::Filters::Common::Lua::alignAndCast( \ + luaL_checkudata(state, Index, typeid(Class).name())); \ object->checkDead(state); \ return object->Name(state); \ } \ @@ -60,6 +61,32 @@ namespace Lua { */ #define DECLARE_LUA_CLOSURE(Class, Name) DECLARE_LUA_FUNCTION_EX(Class, Name, lua_upvalueindex(1)) +/** + * Calculate the maximum space needed to be aligned. + */ +template constexpr size_t maximumSpaceNeededToAlign() { + // The allocated memory can be misaligned up to `alignof(T) - 1` bytes. Adding it to the size to + // allocate. + return sizeof(T) + alignof(T) - 1; +} + +template inline T* alignAndCast(void* mem) { + size_t size = maximumSpaceNeededToAlign(); + return static_cast(std::align(alignof(T), sizeof(T), mem, size)); +} + +/** + * Create a new user data and assign its metatable. + */ +template inline T* allocateLuaUserData(lua_State* state) { + void* mem = lua_newuserdata(state, maximumSpaceNeededToAlign()); + luaL_getmetatable(state, typeid(T).name()); + ASSERT(lua_istable(state, -1)); + lua_setmetatable(state, -2); + + return alignAndCast(mem); +} + /** * This is the base class for all C++ objects that we expose out to Lua. The goal is to hide as * much ugliness as possible. In general, to use this, do the following: @@ -90,14 +117,9 @@ template class BaseLuaObject : protected Logger::Loggable static std::pair create(lua_State* state, ConstructorArgs&&... args) { - // Create a new user data and assign its metatable. - void* mem = lua_newuserdata(state, sizeof(T)); - luaL_getmetatable(state, typeid(T).name()); - ASSERT(lua_istable(state, -1)); - lua_setmetatable(state, -2); - // Memory is allocated via Lua and it is raw. We use placement new to run the constructor. - ENVOY_LOG(trace, "creating {} at {}", typeid(T).name(), mem); + T* mem = allocateLuaUserData(state); + ENVOY_LOG(trace, "creating {} at {}", typeid(T).name(), static_cast(mem)); return {new (mem) T(std::forward(args)...), state}; } @@ -119,7 +141,7 @@ template class BaseLuaObject : protected Logger::Loggable(luaL_checkudata(state, 1, typeid(T).name())); + T* object = alignAndCast(luaL_checkudata(state, 1, typeid(T).name())); ENVOY_LOG(trace, "destroying {} at {}", typeid(T).name(), static_cast(object)); object->~T(); return 0; diff --git a/test/extensions/filters/common/lua/lua_test.cc b/test/extensions/filters/common/lua/lua_test.cc index 24e17e39e9eaa..b2996f1b92aa0 100644 --- a/test/extensions/filters/common/lua/lua_test.cc +++ b/test/extensions/filters/common/lua/lua_test.cc @@ -19,7 +19,9 @@ namespace Common { namespace Lua { namespace { -class TestObject : public BaseLuaObject { +// Setting large alignment requirement here so it fails the UBSAN tests if Lua allocated memory is +// not aligned by Envoy. See https://github.com/envoyproxy/envoy/issues/5551 for details. +class alignas(32) TestObject : public BaseLuaObject { public: ~TestObject() { onDestroy(); } diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index efe5cab515853..bd4a33889dd65 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -285,6 +285,7 @@ accessors acls addr agg +alignof alloc alloca allocator From 53781eb42fdfce56ec3b187dc551344ab4f4a86f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dan=20No=C3=A9?= Date: Tue, 16 Apr 2019 17:31:39 -0400 Subject: [PATCH 133/165] Remove HeaderString::find and migrate callers to getStringView().find() (#6603) Description: Removed the HeaderString::find function, migrating all callers to getStringView().find() which is equivalent. Risk Level: Low Testing: bazel test //test/... Part of #6580 --- include/envoy/http/header_map.h | 10 +--------- source/common/http/user_agent.cc | 4 ++-- test/common/http/header_map_impl_fuzz_test.cc | 2 +- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index eef9498593ec3..3e3ee05422c53 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -136,15 +136,7 @@ class HeaderString { */ bool empty() const { return string_length_ == 0; } - /** - * @return whether a substring exists in the string. - * - * TODO(dnoe): Eliminate this by migrating callers to use string_view find - * directly (#6580) - */ - bool find(const char* str) const { - return getStringView().find(absl::string_view(str)) != absl::string_view::npos; - } + // Looking for find? Use getStringView().find() /** * Set the value of the string by copying data into it. This overwrites any existing string. diff --git a/source/common/http/user_agent.cc b/source/common/http/user_agent.cc index 57cb82e571034..fb6725c4e13bd 100644 --- a/source/common/http/user_agent.cc +++ b/source/common/http/user_agent.cc @@ -36,10 +36,10 @@ void UserAgent::initializeFromHeaders(const HeaderMap& headers, const std::strin const HeaderEntry* user_agent = headers.UserAgent(); if (user_agent) { prefix_ = prefix; - if (user_agent->value().find("iOS")) { + if (user_agent->value().getStringView().find("iOS") != absl::string_view::npos) { type_ = Type::iOS; prefix_ += "user_agent.ios."; - } else if (user_agent->value().find("android")) { + } else if (user_agent->value().getStringView().find("android") != absl::string_view::npos) { type_ = Type::Android; prefix_ += "user_agent.android."; } diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index fbf7c621059a1..37c80a4284bcb 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -106,7 +106,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) header_entry->value().clear(); break; case test::common::http::GetAndMutate::kFind: - header_entry->value().find(get_and_mutate.find().c_str()); + header_entry->value().getStringView().find(get_and_mutate.find()); break; case test::common::http::GetAndMutate::kSetCopy: header_entry->value().setCopy(get_and_mutate.set_copy().c_str(), From 4304b18819546f585e1e51c52fa5df0f01831633 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 16 Apr 2019 20:24:08 -0700 Subject: [PATCH 134/165] support hooks: ignore submodules for single file format check (#6607) Signed-off-by: Matt Klein --- support/hooks/pre-push | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/support/hooks/pre-push b/support/hooks/pre-push index d31e5ff20d714..b9485fd58ecf5 100755 --- a/support/hooks/pre-push +++ b/support/hooks/pre-push @@ -57,7 +57,7 @@ do # `$CLANG_FORMAT` and `$BUILDIFY` are defined, or that the default values it # assumes for these variables correspond to real binaries on the system. If # either of these things aren't true, the check fails. - for i in $(git diff --name-only $RANGE --diff-filter=ACMR 2>&1); do + for i in $(git diff --name-only $RANGE --diff-filter=ACMR --ignore-submodules=all 2>&1); do echo -ne " Checking format for $i - " "$SCRIPT_DIR"/check_format.py check $i if [[ $? -ne 0 ]]; then From a87a09d1b469942c206d07f4654983db26f1ecc2 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 16 Apr 2019 20:29:12 -0700 Subject: [PATCH 135/165] build: initialize const vector explicitly (#6604) Fixes #6584 Risk Level: Low Testing: CI Signed-off-by: Lizan Zhou --- source/common/http/async_client_impl.h | 2 +- source/common/tracing/http_tracer_impl.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 7f48d5234e14a..188c7a297355f 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -149,7 +149,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, return retriable_status_codes_; } - const std::vector retriable_status_codes_; + const std::vector retriable_status_codes_{}; }; struct NullShadowPolicy : public Router::ShadowPolicy { diff --git a/source/common/tracing/http_tracer_impl.h b/source/common/tracing/http_tracer_impl.h index 67db1c818145d..d92dc3c84c519 100644 --- a/source/common/tracing/http_tracer_impl.h +++ b/source/common/tracing/http_tracer_impl.h @@ -117,7 +117,7 @@ class EgressConfigImpl : public Config { bool verbose() const override { return false; } private: - const std::vector request_headers_for_tags_; + const std::vector request_headers_for_tags_{}; }; typedef ConstSingleton EgressConfig; From 5ea1a0c1cb506ed3e80d52b572b0f767f55f9f39 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Wed, 17 Apr 2019 13:29:52 -0400 Subject: [PATCH 136/165] Add tag extractor for RDS route config name (#6618) Description: add a tag extractor regex for the route config name for RDS stats (https://www.envoyproxy.io/docs/envoy/latest/configuration/http_conn_man/rds.html) Risk Level: low Testing: unit tests Signed-off-by: Elisha Ziskind --- source/common/config/well_known_names.cc | 3 +++ source/common/config/well_known_names.h | 2 ++ test/common/stats/tag_extractor_impl_test.cc | 11 +++++++++++ 3 files changed, 16 insertions(+) diff --git a/source/common/config/well_known_names.cc b/source/common/config/well_known_names.cc index 40d3ed42e4c31..16268660b18c3 100644 --- a/source/common/config/well_known_names.cc +++ b/source/common/config/well_known_names.cc @@ -106,6 +106,9 @@ TagNameValues::TagNameValues() { // mongo.(.)* addRegex(MONGO_PREFIX, "^mongo\\.((.*?)\\.)"); + + // http.[.]rds.(.) + addRegex(RDS_ROUTE_CONFIG, "^http(?=\\.).*?\\.rds\\.((.*?)\\.)\\w+?$", ".rds."); } void TagNameValues::addRegex(const std::string& name, const std::string& regex, diff --git a/source/common/config/well_known_names.h b/source/common/config/well_known_names.h index c4e2abe61c945..435aaa47bcd4e 100644 --- a/source/common/config/well_known_names.h +++ b/source/common/config/well_known_names.h @@ -145,6 +145,8 @@ class TagNameValues { const std::string RESPONSE_CODE = "envoy.response_code"; // Request response code class const std::string RESPONSE_CODE_CLASS = "envoy.response_code_class"; + // Route config name for RDS updates + const std::string RDS_ROUTE_CONFIG = "envoy.rds_route_config"; // Mapping from the names above to their respective regex strings. const std::vector> name_regex_pairs_; diff --git a/test/common/stats/tag_extractor_impl_test.cc b/test/common/stats/tag_extractor_impl_test.cc index 4fe93839194b6..b19ebe64b0252 100644 --- a/test/common/stats/tag_extractor_impl_test.cc +++ b/test/common/stats/tag_extractor_impl_test.cc @@ -327,6 +327,17 @@ TEST(TagExtractorTest, DefaultTagExtractors) { regex_tester.testRegex("http.fault_connection_manager.fault.fault_cluster.aborts_injected", "http.fault.aborts_injected", {fault_connection_manager, fault_downstream_cluster}); + + Tag rds_hcm; + rds_hcm.name_ = tag_names.HTTP_CONN_MANAGER_PREFIX; + rds_hcm.value_ = "rds_connection_manager"; + + Tag rds_route_config; + rds_route_config.name_ = tag_names.RDS_ROUTE_CONFIG; + rds_route_config.value_ = "route_config.123"; + + regex_tester.testRegex("http.rds_connection_manager.rds.route_config.123.update_success", + "http.rds.update_success", {rds_hcm, rds_route_config}); } TEST(TagExtractorTest, ExtractRegexPrefix) { From bad70bf1625ad5809f2b32be62b4aa4c33a6741d Mon Sep 17 00:00:00 2001 From: Dan Rosen Date: Wed, 17 Apr 2019 14:43:31 -0400 Subject: [PATCH 137/165] event: fix DispatcherImplTest::InitializeStats flake (#6619) Fix DispatcherImplTest::InitializeStats flake due to dispatcher lifetime exceeding mock stats scope lifetime. Risk Level: low Testing: bazel test //test/common/event:dispatcher_impl_test --runs_per_test=1000 (with ASAN enabled) Docs Changes: n/a Release Notes: n/a Fixes #6611 Signed-off-by: Dan Rosen --- test/common/event/dispatcher_impl_test.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index 25185a30364e2..951617feec356 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -85,6 +85,7 @@ class DispatcherImplTest : public testing::Test { dispatcher_thread_->join(); } + NiceMock scope_; // Used in InitializeStats, must outlive dispatcher_->exit(). Api::ApiPtr api_; Thread::ThreadPtr dispatcher_thread_; DispatcherPtr dispatcher_; @@ -96,11 +97,9 @@ class DispatcherImplTest : public testing::Test { }; TEST_F(DispatcherImplTest, InitializeStats) { - // NiceMock because deliverHistogramToSinks may or may not be called, depending on timing. - NiceMock scope; - EXPECT_CALL(scope, histogram("test.dispatcher.loop_duration_us")); - EXPECT_CALL(scope, histogram("test.dispatcher.poll_delay_us")); - dispatcher_->initializeStats(scope, "test."); + EXPECT_CALL(scope_, histogram("test.dispatcher.loop_duration_us")); + EXPECT_CALL(scope_, histogram("test.dispatcher.poll_delay_us")); + dispatcher_->initializeStats(scope_, "test."); } TEST_F(DispatcherImplTest, Post) { From 504e15f1017466eaf4822875a3eb35b17e78005f Mon Sep 17 00:00:00 2001 From: Michael Puncel Date: Wed, 17 Apr 2019 15:14:13 -0400 Subject: [PATCH 138/165] add HTTP integration tests exercising timeouts (#6621) Add integration tests around HTTP timeouts in the router filter including per try and global timeout. Risk Level: Low Testing: integration tests Signed-off-by: Michael Puncel --- test/integration/BUILD | 11 +++ .../http_timeout_integration_test.cc | 98 +++++++++++++++++++ .../http_timeout_integration_test.h | 22 +++++ 3 files changed, 131 insertions(+) create mode 100644 test/integration/http_timeout_integration_test.cc create mode 100644 test/integration/http_timeout_integration_test.h diff --git a/test/integration/BUILD b/test/integration/BUILD index 8b11b96571885..b2761fc00ac8a 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -171,6 +171,17 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "http_timeout_integration_test", + srcs = [ + "http_timeout_integration_test.cc", + "http_timeout_integration_test.h", + ], + deps = [ + ":http_integration_lib", + ], +) + envoy_cc_test( name = "protocol_integration_test", srcs = [ diff --git a/test/integration/http_timeout_integration_test.cc b/test/integration/http_timeout_integration_test.cc new file mode 100644 index 0000000000000..00b0f4c65ea8a --- /dev/null +++ b/test/integration/http_timeout_integration_test.cc @@ -0,0 +1,98 @@ +#include "test/integration/http_timeout_integration_test.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +INSTANTIATE_TEST_SUITE_P(IpVersions, HttpTimeoutIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Sends a request with a global timeout specified, sleeps for longer than the +// timeout, and ensures that a timeout is received. +TEST_P(HttpTimeoutIntegrationTest, GlobalTimeout) { + initialize(); + + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto encoder_decoder = codec_client_->startRequest( + Http::TestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + {"x-envoy-upstream-rq-timeout-ms", "500"}}); + auto response = std::move(encoder_decoder.second); + request_encoder_ = &encoder_decoder.first; + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + codec_client_->sendData(*request_encoder_, 0, true); + + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Trigger global timeout. + timeSystem().sleep(std::chrono::milliseconds(501)); + + // Ensure we got a timeout downstream and canceled the upstream request. + response->waitForHeaders(); + ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::milliseconds(0))); + + codec_client_->close(); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("504", response->headers().Status()->value().getStringView()); +} + +// Sends a request with a global timeout and per try timeout specified, sleeps +// for longer than the per try but slightly less than the global timeout. +// Ensures that two requests are attempted and a timeout is returned +// downstream. +TEST_P(HttpTimeoutIntegrationTest, PerTryTimeout) { + initialize(); + + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto encoder_decoder = codec_client_->startRequest( + Http::TestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + {"x-envoy-retry-on", "5xx"}, + {"x-envoy-upstream-rq-timeout-ms", "500"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "400"}}); + auto response = std::move(encoder_decoder.second); + request_encoder_ = &encoder_decoder.first; + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + codec_client_->sendData(*request_encoder_, 0, true); + + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Trigger per try timeout (but not global timeout). + timeSystem().sleep(std::chrono::milliseconds(400)); + + // Wait for a second request to be sent upstream + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Trigger global timeout. + timeSystem().sleep(std::chrono::milliseconds(100)); + response->waitForHeaders(); + + codec_client_->close(); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("504", response->headers().Status()->value().getStringView()); +} + +} // namespace Envoy diff --git a/test/integration/http_timeout_integration_test.h b/test/integration/http_timeout_integration_test.h new file mode 100644 index 0000000000000..fd378f4ce7f57 --- /dev/null +++ b/test/integration/http_timeout_integration_test.h @@ -0,0 +1,22 @@ +#pragma once + +#include "test/integration/http_integration.h" + +#include "gtest/gtest.h" + +namespace Envoy { +class HttpTimeoutIntegrationTest : public testing::TestWithParam, + public Event::TestUsingSimulatedTime, + public HttpIntegrationTest { +public: + // Arbitrarily choose HTTP2 here, the tests for this class are around + // timeouts which don't have version specific behavior. + HttpTimeoutIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} + + void SetUp() override { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + } +}; + +} // namespace Envoy From e6a3ce16ff818cc4d5588345fca084efeaa1985d Mon Sep 17 00:00:00 2001 From: vishalpowar Date: Wed, 17 Apr 2019 12:53:01 -0700 Subject: [PATCH 139/165] Implementing Endpoint lease for ClusterLoadAssigment (#6477) Add support for specifying _stale_after timeout as part of ClusterLoadAssignment Risk Level: Low Optional Feature that is triggered by the Management Server. Defaults to noop. Testing: Unit test Docs Changes: None Release Notes: None Fixes #6420 Signed-off-by: Vishal Powar --- api/envoy/api/v2/eds.proto | 7 ++ .../cluster_manager/cluster_stats.rst | 2 + docs/root/intro/version_history.rst | 1 + include/envoy/upstream/upstream.h | 2 + source/common/upstream/eds.cc | 32 +++++- source/common/upstream/eds.h | 2 + test/common/upstream/eds_test.cc | 102 ++++++++++++++++++ test/integration/stats_integration_test.cc | 4 +- 8 files changed, 148 insertions(+), 4 deletions(-) diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index 54f9d08c6f843..2f8fd7a4186dd 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -17,6 +17,7 @@ import "google/api/annotations.proto"; import "validate/validate.proto"; import "gogoproto/gogo.proto"; import "google/protobuf/wrappers.proto"; +import "google/protobuf/duration.proto"; option (gogoproto.equal_all) = true; option (gogoproto.stable_marshaler_all) = true; @@ -107,6 +108,12 @@ message ClusterLoadAssignment { // Read more at :ref:`priority levels ` and // :ref:`localities `. google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32.gt = 0]; + + // The max time until which the endpoints from this assignment can be used. + // If no new assignments are received before this time expires the endpoints + // are considered stale and should be marked unhealthy. + // Defaults to 0 which means endpoints never go stale. + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration.gt.seconds = 0]; } // Load balancing policy settings. diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst index f881e8963ccdd..b5b6554be7b63 100644 --- a/docs/root/configuration/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/cluster_manager/cluster_stats.rst @@ -95,6 +95,8 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi version, Gauge, Hash of the contents from the last successful API fetch max_host_weight, Gauge, Maximum weight of any host in the cluster bind_errors, Counter, Total errors binding the socket to the configured source address + assignment_timeout_received, Counter, Total assignments received with endpoint lease information. + assignment_stale, Counter, Number of times the received assignments went stale before new assignments arrived. Health check statistics ----------------------- diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 93dcb931cc0e7..5ed2c7233f2ed 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,6 +5,7 @@ Version history ================ * dubbo_proxy: support the :ref:`Dubbo proxy filter `. * event: added :ref:`loop duration and poll delay statistics `. +* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. * redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. * redis: add support for zpopmax and zpopmin commands. diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index f6599c0b7254a..25f431860fbc9 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -522,6 +522,8 @@ class PrioritySet { COUNTER (update_failure) \ COUNTER (update_empty) \ COUNTER (update_no_rebuild) \ + COUNTER (assignment_timeout_received) \ + COUNTER (assignment_stale) \ GAUGE (version) // clang-format on diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 7735114e50665..98a9654e838a5 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -2,6 +2,7 @@ #include "envoy/api/v2/eds.pb.validate.h" +#include "common/common/utility.h" #include "common/config/subscription_factory.h" namespace Envoy { @@ -18,11 +19,11 @@ EdsClusterImpl::EdsClusterImpl( ? cluster.name() : cluster.eds_cluster_config().service_name()) { Config::Utility::checkLocalInfo("eds", local_info_); - - const auto& eds_config = cluster.eds_cluster_config().eds_config(); Event::Dispatcher& dispatcher = factory_context.dispatcher(); Runtime::RandomGenerator& random = factory_context.random(); Upstream::ClusterManager& cm = factory_context.clusterManager(); + assignment_timeout_ = dispatcher.createTimer([this]() -> void { onAssignmentTimeout(); }); + const auto& eds_config = cluster.eds_cluster_config().eds_config(); subscription_ = Config::SubscriptionFactory::subscriptionFromConfigSource( eds_config, local_info_, dispatcher, cm, random, info_->statsScope(), "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", @@ -118,10 +119,37 @@ void EdsClusterImpl::onConfigUpdate(const Protobuf::RepeatedPtrFieldenabled()) { + assignment_timeout_->disableTimer(); + } + // Check if endpoint_stale_after is set. + const uint64_t stale_after_ms = + PROTOBUF_GET_MS_OR_DEFAULT(cluster_load_assignment.policy(), endpoint_stale_after, 0); + if (stale_after_ms > 0) { + // Stat to track how often we receive valid assignment_timeout in response. + info_->stats().assignment_timeout_received_.inc(); + assignment_timeout_->enableTimer(std::chrono::milliseconds(stale_after_ms)); + } + BatchUpdateHelper helper(*this, cluster_load_assignment); priority_set_.batchHostUpdate(helper); } +void EdsClusterImpl::onAssignmentTimeout() { + // We can no longer use the assignments, remove them. + // TODO(vishalpowar) This is not going to work for incremental updates, and we + // need to instead change the health status to indicate the assignments are + // stale. + Protobuf::RepeatedPtrField resources; + envoy::api::v2::ClusterLoadAssignment resource; + resource.set_cluster_name(cluster_name_); + resources.Add()->PackFrom(resource); + onConfigUpdate(resources, ""); + // Stat to track how often we end up with stale assignments. + info_->stats().assignment_stale_.inc(); +} + bool EdsClusterImpl::updateHostsPerLocality( const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map, diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index 2194ef2d22344..b2b3139031dae 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -53,6 +53,7 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, Config::SubscriptionCallba // ClusterImplBase void startPreInit() override; + void onAssignmentTimeout(); class BatchUpdateHelper : public PrioritySet::BatchUpdateCb { public: @@ -74,6 +75,7 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, Config::SubscriptionCallba const std::string cluster_name_; std::vector locality_weights_map_; HostMap all_hosts_; + Event::TimerPtr assignment_timeout_; }; class EdsClusterFactory : public ClusterFactoryImplBase { diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index abd69b656e4ef..2ffdb3f991ca3 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -21,6 +21,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AtLeast; using testing::Return; using testing::ReturnRef; @@ -1430,6 +1431,107 @@ TEST_F(EdsTest, MalformedIP) { "setting cluster type to 'STRICT_DNS' or 'LOGICAL_DNS'"); } +class EdsAssignmentTimeoutTest : public EdsTest { +public: + EdsAssignmentTimeoutTest() : EdsTest(), interval_timer_(nullptr) { + EXPECT_CALL(dispatcher_, createTimer_(_)) + .WillOnce(Invoke([this](Event::TimerCb cb) { + timer_cb_ = cb; + EXPECT_EQ(nullptr, interval_timer_); + interval_timer_ = new Event::MockTimer(); + return interval_timer_; + })) + .WillRepeatedly(Invoke([](Event::TimerCb) { return new Event::MockTimer(); })); + + resetCluster(); + } + + Event::MockTimer* interval_timer_; + Event::TimerCb timer_cb_; +}; + +// Test that assignment timeout is enabled and disabled correctly. +TEST_F(EdsAssignmentTimeoutTest, AssignmentTimeoutEnableDisable) { + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + auto* endpoints = cluster_load_assignment.add_endpoints(); + + auto health_checker = std::make_shared(); + EXPECT_CALL(*health_checker, start()); + EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); + cluster_->setHealthChecker(health_checker); + + auto* socket_address = endpoints->add_lb_endpoints() + ->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address(); + socket_address->set_address("1.2.3.4"); + socket_address->set_port_value(80); + + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment_lease = cluster_load_assignment; + cluster_load_assignment_lease.mutable_policy()->mutable_endpoint_stale_after()->MergeFrom( + Protobuf::util::TimeUtil::SecondsToDuration(1)); + + EXPECT_CALL(*interval_timer_, enableTimer(_)).Times(2); // Timer enabled twice. + EXPECT_CALL(*interval_timer_, disableTimer()).Times(1); // Timer disabled once. + EXPECT_CALL(*interval_timer_, enabled()).Times(6); // Includes calls by test. + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_lease); + // Check that the timer is enabled. + EXPECT_EQ(interval_timer_->enabled(), true); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + // Check that the timer is disabled. + EXPECT_EQ(interval_timer_->enabled(), false); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment_lease); + // Check that the timer is enabled. + EXPECT_EQ(interval_timer_->enabled(), true); +} + +// Test that assignment timeout is called and removes all the endpoints. +TEST_F(EdsAssignmentTimeoutTest, AssignmentLeaseExpired) { + envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + cluster_load_assignment.mutable_policy()->mutable_endpoint_stale_after()->MergeFrom( + Protobuf::util::TimeUtil::SecondsToDuration(1)); + + auto health_checker = std::make_shared(); + EXPECT_CALL(*health_checker, start()); + EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); + cluster_->setHealthChecker(health_checker); + + auto add_endpoint = [&cluster_load_assignment](int port) { + auto* endpoints = cluster_load_assignment.add_endpoints(); + + auto* socket_address = endpoints->add_lb_endpoints() + ->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address(); + socket_address->set_address("1.2.3.4"); + socket_address->set_port_value(port); + }; + + // Add two endpoints to the cluster assignment. + add_endpoint(80); + add_endpoint(81); + + // Expect the timer to be enabled once. + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(1000))); + // Expect the timer to be disabled when stale assignments are removed. + EXPECT_CALL(*interval_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enabled()).Times(2); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + { + auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); + EXPECT_EQ(hosts.size(), 2); + } + // Call the timer callback to indicate timeout. + timer_cb_(); + // Test that stale endpoints are removed. + { + auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); + EXPECT_EQ(hosts.size(), 0); + } +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index db0c1845b3afd..d1a3d302294db 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -195,8 +195,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithStats) { EXPECT_LT(start_mem, m1); EXPECT_LT(start_mem, m1001); - // As of 2019/03/20, m_per_cluster = 59015 (libstdc++) - EXPECT_LT(m_per_cluster, 59100); + // As of 2019/04/12, m_per_cluster = 59576 (libstdc++) + EXPECT_LT(m_per_cluster, 59600); } } // namespace From 3a8128a8189a65c2f156eaea5bcb14ac50166ed9 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 17 Apr 2019 17:07:10 -0400 Subject: [PATCH 140/165] security: blameless postmortem template. (#6553) Modified from https://raw.githubusercontent.com/dastergon/postmortem-templates/master/templates/postmortem-template-srebook.md and https://landing.google.com/sre/book/chapters/postmortem.html. Signed-off-by: Harvey Tuch --- security/postmortem-template.md | 75 +++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 security/postmortem-template.md diff --git a/security/postmortem-template.md b/security/postmortem-template.md new file mode 100644 index 0000000000000..b442897c87272 --- /dev/null +++ b/security/postmortem-template.md @@ -0,0 +1,75 @@ +> Slimmed down template from: Betsy Beyer, Chris Jones, Jennifer Petoff, and Niall Richard +> Murphy. [“Site Reliability +> Engineering.”](https://landing.google.com/sre/book/chapters/postmortem.html), +> modified from +> https://raw.githubusercontent.com/dastergon/postmortem-templates/master/templates/postmortem-template-srebook.md. + +> Follow the SRE link for examples of how to populate. + +> A PR should be opened with postmortem placed in security/postmortems/cve-year-abcdef.md. If there +> are multiple CVEs in the postmortem, populate each alias with the string "See cve-year-abcdef.md". + +# Security postmortem for CVE-YEAR-ABCDEF, CVE-YEAR-ABCDEG + +## Incident date(s) + +> YYYY-MM-DD (as a date range if over a period of time) + +## Authors + +> @foo, @bar, ... + +## Status + +> Draft | Final + +## Summary + +> A few sentence summary. + +## CVE issue(s) + +> https://github.com/envoyproxy/envoy/issues/${CVE_ISSUED_ID} + +## Root Causes + +> What defect in Envoy led to the CVEs? How did this defect arise? + +## Resolution + +> How was the security release process followed? How were the fix patches +> structured and authored? + +## Detection + +> How was this discovered? Reported by XYZ, found by fuzzing? Private or public +> disclosure? + +## Action Items + +> Create action item issues and include in their body "Action item for +> CVE-YEAR-ABCDEF". Modify the search string below to include in the PR: + +https://github.com/envoyproxy/envoy/issues?utf8=%E2%9C%93&q=is%3Aissue+%22Action+item+for+CVE-YEAR-ABCDEF%22 + +## Lessons Learned + +### What went well + +### What went wrong + +### Where we got lucky + +## Timeline + +All times US/Pacific + +YYYY-MM-DD +* HH:MM Cake was made available +* HH:MM People ate the cake + +YYYY-MM-DD +* HH:MM More cake was available +* HH:MM People ate more cake + +## Supporting information From c2e8e3f0de2b105e8d4911ce03ab9a23631f87c1 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 17 Apr 2019 21:38:57 -0700 Subject: [PATCH 141/165] tools: check spelling in pre-push hook (#6631) Signed-off-by: Matt Klein --- support/hooks/pre-push | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/support/hooks/pre-push b/support/hooks/pre-push index b9485fd58ecf5..38ef688b9bd52 100755 --- a/support/hooks/pre-push +++ b/support/hooks/pre-push @@ -63,13 +63,18 @@ do if [[ $? -ne 0 ]]; then exit 1 fi + + echo " Checking spelling for $i" + "$SCRIPT_DIR"/check_spelling_pedantic.py check $i + if [[ $? -ne 0 ]]; then + exit 1 + fi done "$SCRIPT_DIR"/format_python_tools.sh check if [[ $? -ne 0 ]]; then exit 1 fi - # Check correctness of repositories definitions. echo " Checking repositories definitions" if ! "$SCRIPT_DIR"/check_repositories.sh; then From 788e66d5ea618d96ebe8502d275245e944d74824 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 18 Apr 2019 00:40:04 -0400 Subject: [PATCH 142/165] build: update jinja to 2.10.1. (#6623) GitHub was complaining that 2.10 was problematic security wise; I don't think it's an issue in our environment, but this should make the warnings go away. Signed-off-by: Harvey Tuch --- configs/requirements.txt | 2 +- docs/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/requirements.txt b/configs/requirements.txt index b60338e30ada0..f4c7b793c7b9c 100644 --- a/configs/requirements.txt +++ b/configs/requirements.txt @@ -1 +1 @@ -jinja2==2.10 +jinja2==2.10.1 diff --git a/docs/requirements.txt b/docs/requirements.txt index 44a91ddeecc9b..8b19a35ae6339 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ GitPython==2.0.8 -Jinja2==2.10 +Jinja2==2.10.1 MarkupSafe==1.1.0 Pygments==2.2.0 alabaster==0.7.10 From 0e109cb3ba3be0823bdb696eacb02a827989efa1 Mon Sep 17 00:00:00 2001 From: Gabriel Sagula Date: Wed, 17 Apr 2019 21:40:23 -0700 Subject: [PATCH 143/165] ext_authz: option for clearing route cache of authorized requests (#6503) Signed-off-by: Gabriel --- .../filter/http/ext_authz/v2/ext_authz.proto | 12 + docs/root/intro/version_history.rst | 1 + .../filters/http/ext_authz/ext_authz.cc | 6 + .../filters/http/ext_authz/ext_authz.h | 4 + .../filters/http/ext_authz/ext_authz_test.cc | 221 +++++++++++++++++- 5 files changed, 243 insertions(+), 1 deletion(-) diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index abe1638b858e6..b430fe93a519f 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -53,6 +53,18 @@ message ExtAuthz { // Enables filter to buffer the client request body and send it within the authorization request. BufferSettings with_request_body = 5; + + // Clears route cache in order to allow the external authorization service to correctly affect + // routing decisions. Filter clears all cached routes when: + // + // 1. The field is set to *true*. + // + // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. + // + // 3. At least one *authorization response header* is added to the client request, or is used for + // altering another client request header. + // + bool clear_route_cache = 6; } // Configuration for buffering the request data. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 5ed2c7233f2ed..6d2771b906b78 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,6 +5,7 @@ Version history ================ * dubbo_proxy: support the :ref:`Dubbo proxy filter `. * event: added :ref:`loop duration and poll delay statistics `. +* ext_authz: added option to `ext_authz` that allows the filter clearing route cache. * eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. * redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index cb93de3af6386..f01c4526fdcab 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -187,6 +187,12 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { // Only send headers if the response is ok. if (response->status == CheckStatus::OK) { ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the request:", *callbacks_); + if (config_->clearRouteCache() && + (!response->headers_to_add.empty() || !response->headers_to_append.empty())) { + ENVOY_STREAM_LOG(debug, "ext_authz is clearing route cache", *callbacks_); + callbacks_->clearRouteCache(); + } + for (const auto& header : response->headers_to_add) { Http::HeaderEntry* header_to_modify = request_headers_->get(header.first); if (header_to_modify) { diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 3cc8821509767..4133aea2d1813 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -41,6 +41,7 @@ class FilterConfig { Runtime::Loader& runtime, Http::Context& http_context) : allow_partial_message_(config.with_request_body().allow_partial_message()), failure_mode_allow_(config.failure_mode_allow()), + clear_route_cache_(config.clear_route_cache()), max_request_bytes_(config.with_request_body().max_request_bytes()), local_info_(local_info), scope_(scope), runtime_(runtime), http_context_(http_context) {} @@ -50,6 +51,8 @@ class FilterConfig { bool failureModeAllow() const { return failure_mode_allow_; } + bool clearRouteCache() const { return clear_route_cache_; } + uint32_t maxRequestBytes() const { return max_request_bytes_; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } @@ -63,6 +66,7 @@ class FilterConfig { private: const bool allow_partial_message_; const bool failure_mode_allow_; + const bool clear_route_cache_; const uint32_t max_request_bytes_; const LocalInfo::LocalInfo& local_info_; Stats::Scope& scope_; diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 6221e95dda3c4..ea3874e1995e1 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -497,6 +497,225 @@ TEST_F(HttpFilterTest, HeaderOnlyRequestWithStream) { EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_headers_)); } +// Verifies that the filter clears the route cache when an authorization response: +// 1. is an OK response. +// 2. has headers to append. +// 3. has headers to add. +TEST_F(HttpFilterTest, ClearCache) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + clear_route_cache: true + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_headers_)); + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); +} + +// Verifies that the filter clears the route cache when an authorization response: +// 1. is an OK response. +// 2. has headers to append. +// 3. has NO headers to add. +TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAppendOnly) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + clear_route_cache: true + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_headers_)); + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); +} + +// Verifies that the filter clears the route cache when an authorization response: +// 1. is an OK response. +// 2. has headers to add. +// 3. has NO headers to append. +TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + clear_route_cache: true + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(1); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_headers_)); + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); +} + +// Verifies that the filter DOES NOT clear the route cache when an authorization response: +// 1. is an OK response. +// 2. has NO headers to add or to append. +TEST_F(HttpFilterTest, NoClearCacheRoute) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + clear_route_cache: true + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_headers_)); + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); +} + +// Verifies that the filter DOES NOT clear the route cache when clear_route_cache is set to false. +TEST_F(HttpFilterTest, NoClearCacheRouteConfig) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_headers_)); + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); +} + +// Verifies that the filter DOES NOT clear the route cache when authorization response is NOT OK. +TEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + clear_route_cache: true + )EOF"); + + prepareCheck(); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; + response.status_code = Http::Code::Unauthorized; + response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + auto response_ptr = std::make_unique(response); + + EXPECT_CALL(*client_, check(_, _, testing::A())) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + callbacks.onComplete(std::move(response_ptr)); + }))); + EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_headers_)); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); +} + // ------------------- // Parameterized Tests // ------------------- @@ -563,7 +782,7 @@ TEST_F(HttpFilterTestParam, DisabledOnRoute) { // baseline: make sure that when not disabled, check is called test_disable(false); - EXPECT_CALL(*client_, check(_, _, _)).Times(1); + EXPECT_CALL(*client_, check(_, _, testing::A())).Times(1); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); From 5cb222964fb691d8cb6cb0f6dd8fdd7b5dbdbb61 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Thu, 18 Apr 2019 05:25:36 -0700 Subject: [PATCH 144/165] api: create OpenRCA service proto file (#6497) Created OpenRCA service proto file based on ORCA design Risk Level: Low Signed-off-by: Chengyuan Zhang --- api/udpa/data/orca/v1/BUILD | 16 +++++++++ api/udpa/data/orca/v1/orca_load_report.proto | 31 ++++++++++++++++ api/udpa/service/orca/v1/BUILD | 20 +++++++++++ api/udpa/service/orca/v1/orca.proto | 38 ++++++++++++++++++++ tools/spelling_dictionary.txt | 3 ++ 5 files changed, 108 insertions(+) create mode 100644 api/udpa/data/orca/v1/BUILD create mode 100644 api/udpa/data/orca/v1/orca_load_report.proto create mode 100644 api/udpa/service/orca/v1/BUILD create mode 100644 api/udpa/service/orca/v1/orca.proto diff --git a/api/udpa/data/orca/v1/BUILD b/api/udpa/data/orca/v1/BUILD new file mode 100644 index 0000000000000..096ca28bac3b3 --- /dev/null +++ b/api/udpa/data/orca/v1/BUILD @@ -0,0 +1,16 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "orca_load_report", + srcs = ["orca_load_report.proto"], + visibility = [ + "//visibility:public", + ], +) + +api_go_proto_library( + name = "orca_load_report", + proto = ":orca_load_report", +) diff --git a/api/udpa/data/orca/v1/orca_load_report.proto b/api/udpa/data/orca/v1/orca_load_report.proto new file mode 100644 index 0000000000000..bed48ed2a88ed --- /dev/null +++ b/api/udpa/data/orca/v1/orca_load_report.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package udpa.data.orca.v1; + +option java_outer_classname = "OrcaLoadReportProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.udpa.data.orca.v1"; +option go_package = "v1"; + +import "validate/validate.proto"; + +// See section `ORCA load report format` of the design document in +// :ref:`https://github.com/envoyproxy/envoy/issues/6614`. + +message OrcaLoadReport { + // CPU utilization expressed as a fraction of available CPU resources. This + // should be derived from a sample or measurement taken during the request. + double cpu_utilization = 1 [(validate.rules).double.gte = 0, (validate.rules).double.lte = 1]; + + // Memory utilization expressed as a fraction of available memory + // resources. This should be derived from a sample or measurement taken + // during the request. + double mem_utilization = 2 [(validate.rules).double.gte = 0, (validate.rules).double.lte = 1]; + + // Application specific requests costs. Each value may be an absolute cost (e.g. + // 3487 bytes of storage) or utilization associated with the request, + // expressed as a fraction of total resources available. Utilization + // metrics should be derived from a sample or measurement taken + // during the request. + map request_cost_or_utilization = 3; +} \ No newline at end of file diff --git a/api/udpa/service/orca/v1/BUILD b/api/udpa/service/orca/v1/BUILD new file mode 100644 index 0000000000000..72543e8092216 --- /dev/null +++ b/api/udpa/service/orca/v1/BUILD @@ -0,0 +1,20 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal") + +licenses(["notice"]) # Apache 2 + +api_proto_library_internal( + name = "orca", + srcs = ["orca.proto"], + has_services = 1, + deps = [ + "//udpa/data/orca/v1:orca_load_report", + ], +) + +api_go_grpc_library( + name = "orca", + proto = ":orca", + deps = [ + "//udpa/data/orca/v1:orca_load_report_go_proto", + ], +) diff --git a/api/udpa/service/orca/v1/orca.proto b/api/udpa/service/orca/v1/orca.proto new file mode 100644 index 0000000000000..87871d209a4cf --- /dev/null +++ b/api/udpa/service/orca/v1/orca.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package udpa.service.orca.v1; + +option java_outer_classname = "OrcaProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.udpa.service.orca.v1"; +option go_package = "v1"; + +import "udpa/data/orca/v1/orca_load_report.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; + +// See section `Out-of-band (OOB) reporting` of the design document in +// :ref:`https://github.com/envoyproxy/envoy/issues/6614`. + +// Out-of-band (OOB) load reporting service for the additional load reporting +// agent that does not sit in the request path. Reports are periodically sampled +// with sufficient frequency to provide temporal association with requests. +// OOB reporting compensates the limitation of in-band reporting in revealing +// costs for backends that do not provide a steady stream of telemetry such as +// long running stream operations and zero QPS services. This is a server +// streaming service, client needs to terminate current RPC and initiate +// a new call to change backend reporting frequency. +service OpenRcaService { + rpc StreamCoreMetrics(OrcaLoadReportRequest) returns (stream udpa.data.orca.v1.OrcaLoadReport); +} + +message OrcaLoadReportRequest { + // Interval for generating Open RCA core metric responses. + google.protobuf.Duration report_interval = 1; + // Request costs to collect. If this is empty, all known requests costs tracked by + // the load reporting agent will be returned. This provides an opportunity for + // the client to selectively obtain a subset of tracked costs. + repeated string request_cost_names = 2; +} diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index bd4a33889dd65..25c768b3558d7 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -160,8 +160,10 @@ NUL Nilsson OCSP OK +OOB OOM OOMs +ORCA OS OSI OSS @@ -181,6 +183,7 @@ POSTs PREBIND PRNG PROT +QPS QUIC RAII RANLUX From 5ea103ee0e88cca52f543fdd230ca9f84c518c88 Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Thu, 18 Apr 2019 07:27:40 -0700 Subject: [PATCH 145/165] router: support customizable retry back-off intervals (#6568) Default behavior remains unchanged: retries will use the runtime parameter defaulted to 25ms as the base interval and 250ms as the maximum. Allows routes to customize the base and maximum intervals. Risk Level: low (no change to default behavior) Testing: unit tests Doc Changes: included, plus updated description of back-off algorithm Release Notes: added Signed-off-by: Stephan Zuercher --- api/envoy/api/v2/route/route.proto | 29 +++++ .../http_filters/router_filter.rst | 23 ++-- docs/root/intro/version_history.rst | 1 + include/envoy/router/router.h | 10 ++ source/common/http/async_client_impl.h | 4 + source/common/router/config_impl.cc | 21 ++++ source/common/router/config_impl.h | 4 + source/common/router/retry_state_impl.cc | 19 ++- test/common/router/config_impl_test.cc | 109 ++++++++++++++++++ test/common/router/retry_state_impl_test.cc | 71 ++++++++++++ test/mocks/router/mocks.h | 4 + 11 files changed, 284 insertions(+), 11 deletions(-) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index f0bda5fd8ace6..87232a78eadce 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -796,6 +796,7 @@ message RouteAction { } // HTTP retry :ref:`architecture overview `. +// [#comment:next free field: 9] message RetryPolicy { // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and @@ -856,6 +857,34 @@ message RetryPolicy { // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; + + message RetryBackOff { + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [ + (validate.rules).duration = { + required: true, + gt: {seconds: 0} + }, + (gogoproto.stdduration) = true + ]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 + [(validate.rules).duration.gt = {seconds: 0}, (gogoproto.stdduration) = true]; + } + + // Specifies parameters that control retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff retry_back_off = 8; } // HTTP request hedging TODO(mpuncel) docs diff --git a/docs/root/configuration/http_filters/router_filter.rst b/docs/root/configuration/http_filters/router_filter.rst index cb5ab6a5941f6..d33a974eaad51 100644 --- a/docs/root/configuration/http_filters/router_filter.rst +++ b/docs/root/configuration/http_filters/router_filter.rst @@ -38,11 +38,17 @@ A few notes on how Envoy does retries: * The route timeout (set via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or the :ref:`route configuration `) **includes** all retries. Thus if the request timeout is set to 3s, and the first request attempt takes 2.7s, the - retry (including backoff) has .3s to complete. This is by design to avoid an exponential + retry (including back-off) has .3s to complete. This is by design to avoid an exponential retry/timeout explosion. -* Envoy uses a fully jittered exponential backoff algorithm for retries with a base time of 25ms. - The first retry will be delayed randomly between 0-24ms, the 2nd between 0-74ms, the 3rd between - 0-174ms and so on. +* Envoy uses a fully jittered exponential back-off algorithm for retries with a default base + interval of 25ms. Given a base interval B and retry number N, the back-off for the retry is in + the range :math:`\big[0, (2^N-1)B\big)`. For example, given the default interval, the first retry + will be delayed randomly by 0-24ms, the 2nd by 0-74ms, the 3rd by 0-174ms, and so on. The + interval is capped at a maximum interval, which defaults to 10 times the base interval (250ms). + The default base interval (and therefore the maximum interval) can be manipulated by setting the + upstream.base_retry_backoff_ms runtime parameter. The back-off intervals can also be modified + by configuring the retry policy's + :ref:`retry back-off `. * If max retries is set both by header as well as in the route configuration, the maximum value is taken when determining the max retries to use for the request. @@ -156,7 +162,7 @@ x-envoy-retriable-status-codes Setting this header informs Envoy about what status codes should be considered retriable when used in conjunction with the :ref:`retriable-status-code ` retry policy. When the corresponding retry policy is set, the list of retriable status codes will be considered retriable -in addition to the status codes enabled for retry through other retry policies. +in addition to the status codes enabled for retry through other retry policies. The list is a comma delimited list of integers: "409" would cause 409 to be considered retriable, while "504,409" would consider both 504 and 409 retriable. @@ -239,7 +245,7 @@ x-envoy-ratelimited If this header is set by upstream, Envoy will not retry. Currently the value of the header is not looked at, only its presence. This header is set by :ref:`rate limit filter` -when the request is rate limited. +when the request is rate limited. .. _config_http_filters_router_x-envoy-decorator-operation: @@ -350,8 +356,9 @@ Runtime The router filter supports the following runtime settings: upstream.base_retry_backoff_ms - Base exponential retry back off time. See :ref:`here ` for more - information. Defaults to 25ms. + Base exponential retry back-off time. See :ref:`here ` and + :ref:`config_http_filters_router_x-envoy-max-retries` for more information. Defaults to 25ms. + The default maximum retry back-off time is 10 times this value. .. _config_http_filters_router_runtime_maintenance_mode: diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 6d2771b906b78..3458bd0d5b55a 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -10,6 +10,7 @@ Version history * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. * redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. * redis: add support for zpopmax and zpopmin commands. +* router: added ability to control retry back-off intervals via :ref:`retry policy `. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. 1.10.0 (Apr 5, 2019) diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index e090c9caadc63..acd9738ab08ee 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -201,6 +201,16 @@ class RetryPolicy { * policy is enabled. */ virtual const std::vector& retriableStatusCodes() const PURE; + + /** + * @return absl::optional base retry interval + */ + virtual absl::optional baseInterval() const PURE; + + /** + * @return absl::optional maximum retry interval + */ + virtual absl::optional maxInterval() const PURE; }; /** diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 188c7a297355f..3e359099b22af 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -148,6 +148,10 @@ class AsyncStreamImpl : public AsyncClient::Stream, const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; } + absl::optional baseInterval() const override { + return absl::nullopt; + } + absl::optional maxInterval() const override { return absl::nullopt; } const std::vector retriable_status_codes_{}; }; diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 1b119c123328d..ca8b703c57291 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -96,6 +96,27 @@ RetryPolicyImpl::RetryPolicyImpl(const envoy::api::v2::route::RetryPolicy& retry for (auto code : retry_policy.retriable_status_codes()) { retriable_status_codes_.emplace_back(code); } + + if (retry_policy.has_retry_back_off()) { + base_interval_ = std::chrono::milliseconds( + PROTOBUF_GET_MS_REQUIRED(retry_policy.retry_back_off(), base_interval)); + if ((*base_interval_).count() < 1) { + base_interval_ = std::chrono::milliseconds(1); + } + + max_interval_ = PROTOBUF_GET_OPTIONAL_MS(retry_policy.retry_back_off(), max_interval); + if (max_interval_) { + // Apply the same rounding to max interval in case both are set to sub-millisecond values. + if ((*max_interval_).count() < 1) { + max_interval_ = std::chrono::milliseconds(1); + } + + if ((*max_interval_).count() < (*base_interval_).count()) { + throw EnvoyException( + "retry_policy.max_interval must greater than or equal to the base_interval"); + } + } + } } std::vector RetryPolicyImpl::retryHostPredicates() const { diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index c9fdeb40403e3..2ded20d38e4fe 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -227,6 +227,8 @@ class RetryPolicyImpl : public RetryPolicy { const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; } + absl::optional baseInterval() const override { return base_interval_; } + absl::optional maxInterval() const override { return max_interval_; } private: std::chrono::milliseconds per_try_timeout_{0}; @@ -241,6 +243,8 @@ class RetryPolicyImpl : public RetryPolicy { std::pair retry_priority_config_; uint32_t host_selection_attempts_{1}; std::vector retriable_status_codes_; + absl::optional base_interval_; + absl::optional max_interval_; }; /** diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 3d84a4208e260..ca6b60d077690 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -59,9 +59,22 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::HeaderMap& retry_on_ = route_policy.retryOn(); retries_remaining_ = std::max(retries_remaining_, route_policy.numRetries()); - const uint32_t base = runtime_.snapshot().getInteger("upstream.base_retry_backoff_ms", 25); - // Cap the max interval to 10 times the base interval to ensure reasonable backoff intervals. - backoff_strategy_ = std::make_unique(base, base * 10, random_); + + std::chrono::milliseconds base_interval( + runtime_.snapshot().getInteger("upstream.base_retry_backoff_ms", 25)); + if (route_policy.baseInterval()) { + base_interval = *route_policy.baseInterval(); + } + + // By default, cap the max interval to 10 times the base interval to ensure reasonable back-off + // intervals. + std::chrono::milliseconds max_interval = base_interval * 10; + if (route_policy.maxInterval()) { + max_interval = *route_policy.maxInterval(); + } + + backoff_strategy_ = std::make_unique(base_interval.count(), + max_interval.count(), random_); host_selection_max_attempts_ = route_policy.hostSelectionMaxAttempts(); // Merge in the headers. diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 4215294deaf50..b2d2a1589262d 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2399,6 +2399,115 @@ TEST_F(RouteMatcherTest, GrpcRetry) { .retryOn()); } +// Test route-specific retry back-off intervals. +TEST_F(RouteMatcherTest, RetryBackOffIntervals) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - www.lyft.com + routes: + - match: + prefix: "/foo" + route: + cluster: www2 + retry_policy: + retry_back_off: + base_interval: 0.050s + - match: + prefix: "/bar" + route: + cluster: www2 + retry_policy: + retry_back_off: + base_interval: 0.100s + max_interval: 0.500s + - match: + prefix: "/baz" + route: + cluster: www2 + retry_policy: + retry_back_off: + base_interval: 0.0001s # < 1 ms + max_interval: 0.0001s + - match: + prefix: "/" + route: + cluster: www2 + retry_policy: + retry_on: connect-failure + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + + EXPECT_EQ(absl::optional(50), + config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .baseInterval()); + + EXPECT_EQ(absl::nullopt, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .maxInterval()); + + EXPECT_EQ(absl::optional(100), + config.route(genHeaders("www.lyft.com", "/bar", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .baseInterval()); + + EXPECT_EQ(absl::optional(500), + config.route(genHeaders("www.lyft.com", "/bar", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .maxInterval()); + + // Sub-millisecond interval converted to 1 ms. + EXPECT_EQ(absl::optional(1), + config.route(genHeaders("www.lyft.com", "/baz", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .baseInterval()); + + EXPECT_EQ(absl::optional(1), + config.route(genHeaders("www.lyft.com", "/baz", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .maxInterval()); + + EXPECT_EQ(absl::nullopt, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .baseInterval()); + + EXPECT_EQ(absl::nullopt, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .maxInterval()); +} + +// Test invalid route-specific retry back-off configs. +TEST_F(RouteMatcherTest, InvalidRetryBackOff) { + const std::string invalid_max = R"EOF( +virtual_hosts: + - name: backoff + domains: ["*"] + routes: + - match: { prefix: "/" } + route: + cluster: backoff + retry_policy: + retry_back_off: + base_interval: 10s + max_interval: 5s +)EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_max), factory_context_, true), + EnvoyException, "retry_policy.max_interval must greater than or equal to the base_interval"); +} + TEST_F(RouteMatcherTest, HedgeRouteLevel) { const std::string yaml = R"EOF( name: HedgeRouteLevel diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index af96861b8b9a7..415fa5b32ec28 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -505,6 +505,77 @@ TEST_F(RouterRetryStateImplTest, Backoff) { EXPECT_EQ(0UL, cluster_.circuit_breakers_stats_.rq_retry_open_.value()); } +// Test customized retry back-off intervals. +TEST_F(RouterRetryStateImplTest, CustomBackOffInterval) { + policy_.num_retries_ = 10; + policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE; + policy_.base_interval_ = std::chrono::milliseconds(100); + policy_.max_interval_ = std::chrono::milliseconds(1200); + Http::TestHeaderMapImpl request_headers; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + EXPECT_CALL(random_, random()).WillOnce(Return(149)); + retry_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(49))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); + + EXPECT_CALL(random_, random()).WillOnce(Return(350)); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(50))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); + + EXPECT_CALL(random_, random()).WillOnce(Return(751)); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(51))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); + + EXPECT_CALL(random_, random()).WillOnce(Return(1499)); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(1200))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); +} + +// Test the default maximum retry back-off interval. +TEST_F(RouterRetryStateImplTest, CustomBackOffIntervalDefaultMax) { + policy_.num_retries_ = 10; + policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE; + policy_.base_interval_ = std::chrono::milliseconds(100); + Http::TestHeaderMapImpl request_headers; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + EXPECT_CALL(random_, random()).WillOnce(Return(149)); + retry_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(49))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); + + EXPECT_CALL(random_, random()).WillOnce(Return(350)); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(50))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); + + EXPECT_CALL(random_, random()).WillOnce(Return(751)); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(51))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); + + EXPECT_CALL(random_, random()).WillOnce(Return(1499)); + EXPECT_CALL(*retry_timer_, enableTimer(std::chrono::milliseconds(1000))); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(connect_failure_, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->callback_(); +} + TEST_F(RouterRetryStateImplTest, HostSelectionAttempts) { policy_.host_selection_max_attempts_ = 2; policy_.retry_on_ = RetryPolicy::RETRY_ON_CONNECT_FAILURE; diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index ba40815aaab49..f49cd5f719363 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -91,12 +91,16 @@ class TestRetryPolicy : public RetryPolicy { const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; } + absl::optional baseInterval() const override { return base_interval_; } + absl::optional maxInterval() const override { return max_interval_; } std::chrono::milliseconds per_try_timeout_{0}; uint32_t num_retries_{}; uint32_t retry_on_{}; uint32_t host_selection_max_attempts_; std::vector retriable_status_codes_; + absl::optional base_interval_{}; + absl::optional max_interval_{}; }; class MockRetryState : public RetryState { From a039b9d8be841f003cec24e78ad9e4e35a774b9f Mon Sep 17 00:00:00 2001 From: danzh Date: Thu, 18 Apr 2019 12:32:14 -0400 Subject: [PATCH 146/165] quiche: Implement SpdyUnsafeArena using SpdySimpleArena (#6612) Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 15 +++++++++++++++ bazel/repository_locations.bzl | 6 +++--- .../quic_listeners/quiche/platform/BUILD | 12 +++++++++++- .../quiche/platform/quic_logging_impl.h | 2 ++ .../quiche/platform/spdy_unsafe_arena_impl.h | 5 +++-- 5 files changed, 34 insertions(+), 6 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index d3b82aad1b915..2b65a5dddd787 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -103,6 +103,21 @@ cc_library( deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:spdy_platform_impl_lib"], ) +cc_library( + name = "spdy_simple_arena_lib", + srcs = ["quiche/spdy/core/spdy_simple_arena.cc"], + hdrs = ["quiche/spdy/core/spdy_simple_arena.h"], + visibility = ["//visibility:public"], + deps = [":spdy_platform"], +) + +cc_library( + name = "spdy_platform_unsafe_arena_lib", + hdrs = ["quiche/spdy/platform/api/spdy_unsafe_arena.h"], + visibility = ["//visibility:public"], + deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:spdy_platform_unsafe_arena_impl_lib"], +) + cc_library( name = "quic_platform", srcs = ["quiche/quic/platform/api/quic_mutex.cc"] + envoy_select_quiche( diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index cbe18bc5dd46c..c6ed9801e5281 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -238,8 +238,8 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/subpar/archive/1.3.0.tar.gz"], ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ba6354aa1b39f3d9788ead909ad3e678ac863938.tar.gz - sha256 = "4598537810c3d343c32333c5367fcb652638018118f7f4e844e080405d9e73bb", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/ba6354aa1b39f3d9788ead909ad3e678ac863938.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/840edb6d672931ff936004fc35a82ecac6060844.tar.gz + sha256 = "1aba26cec596e9f3b52d93fe40e1640c854e3a4c8949e362647f67eb8e2382e3", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/840edb6d672931ff936004fc35a82ecac6060844.tar.gz"], ), ) diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 0cfc9373b4e9c..f290bc635f3d0 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -189,7 +189,6 @@ envoy_cc_library( "spdy_string_piece_impl.h", "spdy_test_helpers_impl.h", "spdy_test_utils_prod_impl.h", - "spdy_unsafe_arena_impl.h", ] + envoy_select_quiche([ "spdy_bug_tracker_impl.h", "spdy_logging_impl.h", @@ -209,3 +208,14 @@ envoy_cc_library( "//source/common/common:assert_lib", ]), ) + +envoy_cc_library( + name = "spdy_platform_unsafe_arena_impl_lib", + hdrs = [ + "spdy_unsafe_arena_impl.h", + ], + visibility = ["//visibility:public"], + deps = envoy_select_quiche([ + "@com_googlesource_quiche//:spdy_simple_arena_lib", + ]), +) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h index bf4e426616fa1..131e6292068d0 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h @@ -82,6 +82,8 @@ #define QUIC_NOTREACHED_IMPL() NOT_REACHED_GCOVR_EXCL_LINE #endif +#define DCHECK_GE(a, b) DCHECK((a) >= (b)) + #define QUIC_PREDICT_FALSE_IMPL(x) ABSL_PREDICT_FALSE(x) namespace quic { diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h index 8db8e4a20bd44..d731d001dcef0 100644 --- a/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/spdy_unsafe_arena_impl.h @@ -6,9 +6,10 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include "quiche/spdy/core/spdy_simple_arena.h" + namespace spdy { -// TODO: implement -class SpdyUnsafeArenaImpl {}; +using SpdyUnsafeArenaImpl = SpdySimpleArena; } // namespace spdy From 40503920d9764a211a0001ff7c517fc106907179 Mon Sep 17 00:00:00 2001 From: Christopher Date: Thu, 18 Apr 2019 12:33:13 -0400 Subject: [PATCH 147/165] examples: standardize docker-compose version and yaml extension (#6613) Signed-off-by: Chris Paika --- docs/root/start/sandboxes/front_proxy.rst | 2 +- docs/root/start/sandboxes/grpc_bridge.rst | 6 +++--- docs/root/start/sandboxes/jaeger_native_tracing.rst | 2 +- docs/root/start/sandboxes/jaeger_tracing.rst | 2 +- docs/root/start/sandboxes/zipkin_tracing.rst | 2 +- .../backend/{docker-compose.yml => docker-compose.yaml} | 2 +- .../frontend/{docker-compose.yml => docker-compose.yaml} | 2 +- .../{docker-compose.yml => docker-compose.yaml} | 2 +- .../front-proxy/{docker-compose.yml => docker-compose.yaml} | 2 +- examples/grpc-bridge/Dockerfile-grpc | 2 +- .../grpc-bridge/{docker-compose.yml => docker-compose.yaml} | 2 +- examples/grpc-bridge/script/{bootstrap => bootstrap.sh} | 0 examples/grpc-bridge/script/{grpc_start => grpc_start.sh} | 0 .../{docker-compose.yml => docker-compose.yaml} | 2 +- .../{docker-compose.yml => docker-compose.yaml} | 2 +- examples/lua/docker-compose.yaml | 2 +- examples/mysql/docker-compose.yaml | 2 +- examples/redis/docker-compose.yaml | 2 +- .../{docker-compose.yml => docker-compose.yaml} | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) rename examples/cors/backend/{docker-compose.yml => docker-compose.yaml} (97%) rename examples/cors/frontend/{docker-compose.yml => docker-compose.yaml} (97%) rename examples/fault-injection/{docker-compose.yml => docker-compose.yaml} (97%) rename examples/front-proxy/{docker-compose.yml => docker-compose.yaml} (98%) rename examples/grpc-bridge/{docker-compose.yml => docker-compose.yaml} (96%) rename examples/grpc-bridge/script/{bootstrap => bootstrap.sh} (100%) rename examples/grpc-bridge/script/{grpc_start => grpc_start.sh} (100%) rename examples/jaeger-native-tracing/{docker-compose.yml => docker-compose.yaml} (99%) rename examples/jaeger-tracing/{docker-compose.yml => docker-compose.yaml} (98%) rename examples/zipkin-tracing/{docker-compose.yml => docker-compose.yaml} (98%) diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst index bfcfae2d02fc8..73e12cf1588a6 100644 --- a/docs/root/start/sandboxes/front_proxy.rst +++ b/docs/root/start/sandboxes/front_proxy.rst @@ -16,7 +16,7 @@ Below you can see a graphic showing the docker compose deployment: All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` by docker compose -(see :repo:`/examples/front-proxy/docker-compose.yml`). Moreover, notice +(see :repo:`/examples/front-proxy/docker-compose.yaml`). Moreover, notice that all traffic routed by the front Envoy to the service containers is actually routed to the service Envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service envoys route the request to the flask app via the loopback address (routes setup in diff --git a/docs/root/start/sandboxes/grpc_bridge.rst b/docs/root/start/sandboxes/grpc_bridge.rst index c08b54f1ed218..aa61e60742699 100644 --- a/docs/root/start/sandboxes/grpc_bridge.rst +++ b/docs/root/start/sandboxes/grpc_bridge.rst @@ -23,10 +23,10 @@ To build the Go gRPC service run:: $ pwd envoy/examples/grpc-bridge - $ script/bootstrap - $ script/build + $ script/bootstrap.sh + $ script/build.sh -Note: ``build`` requires that your Envoy codebase (or a working copy thereof) is in ``$GOPATH/src/github.com/envoyproxy/envoy``. +Note: ``build.sh`` requires that your Envoy codebase (or a working copy thereof) is in ``$GOPATH/src/github.com/envoyproxy/envoy``. Docker compose ~~~~~~~~~~~~~~ diff --git a/docs/root/start/sandboxes/jaeger_native_tracing.rst b/docs/root/start/sandboxes/jaeger_native_tracing.rst index 52bdda5e269f4..07193e03f9740 100644 --- a/docs/root/start/sandboxes/jaeger_native_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_native_tracing.rst @@ -22,7 +22,7 @@ only works on x86-64). All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/jaeger-native-tracing/docker-compose.yml`). Notice that +by docker compose (see :repo:`/examples/jaeger-native-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-native-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated by the Jaeger tracer to a Jaeger cluster (trace driver setup diff --git a/docs/root/start/sandboxes/jaeger_tracing.rst b/docs/root/start/sandboxes/jaeger_tracing.rst index 4277b60ff5d47..bad25e5bd26fc 100644 --- a/docs/root/start/sandboxes/jaeger_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_tracing.rst @@ -11,7 +11,7 @@ The three containers will be deployed inside a virtual network called ``envoymes All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yml`). Notice that +by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated by the Jaeger tracer to a Jaeger cluster (trace driver setup diff --git a/docs/root/start/sandboxes/zipkin_tracing.rst b/docs/root/start/sandboxes/zipkin_tracing.rst index c26e492960dd4..c64ce82e9f39a 100644 --- a/docs/root/start/sandboxes/zipkin_tracing.rst +++ b/docs/root/start/sandboxes/zipkin_tracing.rst @@ -11,7 +11,7 @@ The three containers will be deployed inside a virtual network called ``envoymes All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yml`). Notice that +by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`) and setup to propagate the spans generated by the Zipkin tracer to a Zipkin cluster (trace driver setup diff --git a/examples/cors/backend/docker-compose.yml b/examples/cors/backend/docker-compose.yaml similarity index 97% rename from examples/cors/backend/docker-compose.yml rename to examples/cors/backend/docker-compose.yaml index 62f6bd2bcbb3b..987b4ef157bab 100644 --- a/examples/cors/backend/docker-compose.yml +++ b/examples/cors/backend/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/cors/frontend/docker-compose.yml b/examples/cors/frontend/docker-compose.yaml similarity index 97% rename from examples/cors/frontend/docker-compose.yml rename to examples/cors/frontend/docker-compose.yaml index b29dc7f7d37cd..96b19d222e431 100644 --- a/examples/cors/frontend/docker-compose.yml +++ b/examples/cors/frontend/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/fault-injection/docker-compose.yml b/examples/fault-injection/docker-compose.yaml similarity index 97% rename from examples/fault-injection/docker-compose.yml rename to examples/fault-injection/docker-compose.yaml index b3b3f1da27c44..fe8ec0c9d68fb 100644 --- a/examples/fault-injection/docker-compose.yml +++ b/examples/fault-injection/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: envoy: build: diff --git a/examples/front-proxy/docker-compose.yml b/examples/front-proxy/docker-compose.yaml similarity index 98% rename from examples/front-proxy/docker-compose.yml rename to examples/front-proxy/docker-compose.yaml index 3d371f889c694..34491c3636ce0 100644 --- a/examples/front-proxy/docker-compose.yml +++ b/examples/front-proxy/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '3.7' +version: "3.7" services: front-envoy: diff --git a/examples/grpc-bridge/Dockerfile-grpc b/examples/grpc-bridge/Dockerfile-grpc index f8e3cb3ad27a4..679b0a728a01c 100644 --- a/examples/grpc-bridge/Dockerfile-grpc +++ b/examples/grpc-bridge/Dockerfile-grpc @@ -2,6 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN mkdir /var/log/envoy/ COPY ./bin/service /usr/local/bin/srv -COPY ./script/grpc_start /etc/grpc_start +COPY ./script/grpc_start.sh /etc/grpc_start CMD /etc/grpc_start diff --git a/examples/grpc-bridge/docker-compose.yml b/examples/grpc-bridge/docker-compose.yaml similarity index 96% rename from examples/grpc-bridge/docker-compose.yml rename to examples/grpc-bridge/docker-compose.yaml index 6cc1cf48843b2..25d5b1fb3cd36 100644 --- a/examples/grpc-bridge/docker-compose.yml +++ b/examples/grpc-bridge/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: python: diff --git a/examples/grpc-bridge/script/bootstrap b/examples/grpc-bridge/script/bootstrap.sh similarity index 100% rename from examples/grpc-bridge/script/bootstrap rename to examples/grpc-bridge/script/bootstrap.sh diff --git a/examples/grpc-bridge/script/grpc_start b/examples/grpc-bridge/script/grpc_start.sh similarity index 100% rename from examples/grpc-bridge/script/grpc_start rename to examples/grpc-bridge/script/grpc_start.sh diff --git a/examples/jaeger-native-tracing/docker-compose.yml b/examples/jaeger-native-tracing/docker-compose.yaml similarity index 99% rename from examples/jaeger-native-tracing/docker-compose.yml rename to examples/jaeger-native-tracing/docker-compose.yaml index c829cf41732c6..3321e110cbb83 100644 --- a/examples/jaeger-native-tracing/docker-compose.yml +++ b/examples/jaeger-native-tracing/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/jaeger-tracing/docker-compose.yml b/examples/jaeger-tracing/docker-compose.yaml similarity index 98% rename from examples/jaeger-tracing/docker-compose.yml rename to examples/jaeger-tracing/docker-compose.yaml index 2c75265724eaa..6c353fada6f49 100644 --- a/examples/jaeger-tracing/docker-compose.yml +++ b/examples/jaeger-tracing/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: diff --git a/examples/lua/docker-compose.yaml b/examples/lua/docker-compose.yaml index 31ce3501ea1a8..2ee4860cfc48d 100644 --- a/examples/lua/docker-compose.yaml +++ b/examples/lua/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: proxy: diff --git a/examples/mysql/docker-compose.yaml b/examples/mysql/docker-compose.yaml index 0f08d279fa615..d4b8b13e13c1e 100644 --- a/examples/mysql/docker-compose.yaml +++ b/examples/mysql/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '3.5' +version: "3.7" services: proxy: diff --git a/examples/redis/docker-compose.yaml b/examples/redis/docker-compose.yaml index 0e18c9019c6e8..5b2d82a6e1163 100644 --- a/examples/redis/docker-compose.yaml +++ b/examples/redis/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: proxy: diff --git a/examples/zipkin-tracing/docker-compose.yml b/examples/zipkin-tracing/docker-compose.yaml similarity index 98% rename from examples/zipkin-tracing/docker-compose.yml rename to examples/zipkin-tracing/docker-compose.yaml index 02673d611ee73..132e752793058 100644 --- a/examples/zipkin-tracing/docker-compose.yml +++ b/examples/zipkin-tracing/docker-compose.yaml @@ -1,4 +1,4 @@ -version: '2' +version: "3.7" services: front-envoy: From 109d23a6648b1ac7723c4b2f125e913125bb9a84 Mon Sep 17 00:00:00 2001 From: Dan Rosen Date: Thu, 18 Apr 2019 14:37:30 -0400 Subject: [PATCH 148/165] event: update libevent dependency to fix race condition (#6637) @htuch discovered a race condition in my libevent watcher implementation in the process of enabling TSAN for dependencies (#6610). Update libevent to pull in the fix (libevent/libevent#793). Risk Level: low Testing: bazel test //test/server:worker_impl_test -c dbg --config=clang-tsan --runs_per_test=1000 (with @htuch's patch applied). Signed-off-by: Dan Rosen --- bazel/repository_locations.bzl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index c6ed9801e5281..e1fdafa920036 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -129,11 +129,13 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/benchmark/archive/505be96ab23056580a3a2315abba048f4428b04e.tar.gz"], ), com_github_libevent_libevent = dict( - sha256 = "217d7282d41faabac8c74d8ea0f215d8fa065691fb4b1f9205cbe16a2a65c1cc", - # This SHA is when "prepare" and "check" watchers were added to libevent (see - # https://github.com/libevent/libevent/pull/793). Update to v2.2 when it is released. - strip_prefix = "libevent-2f184f8bbf23377bddc8daa1a2c7b40735ee7e2a", - urls = ["https://github.com/libevent/libevent/archive/2f184f8bbf23377bddc8daa1a2c7b40735ee7e2a.tar.gz"], + sha256 = "ab3af422b7e4c6d9276b3637d87edb6cf628fd91c9206260b759778c3a28b330", + # This SHA includes the new "prepare" and "check" watchers, used for event loop performance + # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition + # in the watchers (see https://github.com/libevent/libevent/pull/802). + # TODO(mergeconflict): Update to v2.2 when it is released. + strip_prefix = "libevent-1cd8830de27c30c5324c75bfb6012c969c09ca2c", + urls = ["https://github.com/libevent/libevent/archive/1cd8830de27c30c5324c75bfb6012c969c09ca2c.tar.gz"], ), com_github_madler_zlib = dict( sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff", From 3b8e1764a6119dbc367710a80acb39e579b08d81 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 18 Apr 2019 14:57:08 -0700 Subject: [PATCH 149/165] fault filter: reset token bucket on data start (#6627) This change alters the behavior of fault data limiting by resetting the token bucket to a single token when data initially starts streaming. This makes sure that data pacing is as expecting, while still allowing per-second bursting if the data provider is also bursty. Signed-off-by: Matt Klein --- include/envoy/common/token_bucket.h | 8 +++- source/common/common/token_bucket_impl.cc | 6 +++ source/common/common/token_bucket_impl.h | 1 + .../filters/http/fault/fault_filter.cc | 10 +++++ .../filters/http/fault/fault_filter.h | 1 + test/common/common/token_bucket_impl_test.cc | 8 ++++ .../fault/fault_filter_integration_test.cc | 42 +++++++++---------- 7 files changed, 52 insertions(+), 24 deletions(-) diff --git a/include/envoy/common/token_bucket.h b/include/envoy/common/token_bucket.h index 4a8750fc013c9..2f04c7c8c461f 100644 --- a/include/envoy/common/token_bucket.h +++ b/include/envoy/common/token_bucket.h @@ -15,7 +15,7 @@ namespace Envoy { */ class TokenBucket { public: - virtual ~TokenBucket() {} + virtual ~TokenBucket() = default; /** * @param tokens supplies the number of tokens to be consumed. @@ -32,6 +32,12 @@ class TokenBucket { * returns the upper bound on the amount of time until a next token is available. */ virtual std::chrono::milliseconds nextTokenAvailable() PURE; + + /** + * Reset the bucket with a specific number of tokens. Refill will begin again from the time that + * this routine is called. + */ + virtual void reset(uint64_t num_tokens) PURE; }; typedef std::unique_ptr TokenBucketPtr; diff --git a/source/common/common/token_bucket_impl.cc b/source/common/common/token_bucket_impl.cc index bf93dc32f447c..5e7de9e6bb1a7 100644 --- a/source/common/common/token_bucket_impl.cc +++ b/source/common/common/token_bucket_impl.cc @@ -38,4 +38,10 @@ std::chrono::milliseconds TokenBucketImpl::nextTokenAvailable() { return std::chrono::milliseconds(static_cast(std::ceil((1 / fill_rate_) * 1000))); } +void TokenBucketImpl::reset(uint64_t num_tokens) { + ASSERT(num_tokens <= max_tokens_); + tokens_ = num_tokens; + last_fill_ = time_source_.monotonicTime(); +} + } // namespace Envoy diff --git a/source/common/common/token_bucket_impl.h b/source/common/common/token_bucket_impl.h index 7daa3fb8e79b0..644a4185dd5ab 100644 --- a/source/common/common/token_bucket_impl.h +++ b/source/common/common/token_bucket_impl.h @@ -23,6 +23,7 @@ class TokenBucketImpl : public TokenBucket { // TokenBucket uint64_t consume(uint64_t tokens, bool allow_partial) override; std::chrono::milliseconds nextTokenAvailable() override; + void reset(uint64_t num_tokens) override; private: const double max_tokens_; diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index c9502ff938c56..3d9acf69f94ae 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -416,6 +416,16 @@ void StreamRateLimiter::onTokenTimer() { ENVOY_LOG(trace, "limiter: timer wakeup: buffered={}", buffer_.length()); Buffer::OwnedImpl data_to_write; + if (!saw_data_) { + // The first time we see any data on this stream (via writeData()), reset the number of tokens + // to 1. This will ensure that we start pacing the data at the desired rate (and don't send a + // full 1s of data right away which might not introduce enough delay for a stream that doesn't + // have enough data to span more than 1s of rate allowance). Once we reset, we will subsequently + // allow for bursting within the second to account for our data provider being bursty. + token_bucket_.reset(1); + saw_data_ = true; + } + // Compute the number of tokens needed (rounded up), try to obtain that many tickets, and then // figure out how many bytes to write given the number of tokens we actually got. const uint64_t tokens_needed = diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index fcf78ca15d003..2b535edd666e0 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -151,6 +151,7 @@ class StreamRateLimiter : Logger::Loggable { const std::function continue_cb_; TokenBucketImpl token_bucket_; Event::TimerPtr token_timer_; + bool saw_data_{}; bool saw_end_stream_{}; bool saw_trailers_{}; Buffer::WatermarkBuffer buffer_; diff --git a/test/common/common/token_bucket_impl_test.cc b/test/common/common/token_bucket_impl_test.cc index 4a44acd847016..aec4744bc83eb 100644 --- a/test/common/common/token_bucket_impl_test.cc +++ b/test/common/common/token_bucket_impl_test.cc @@ -85,4 +85,12 @@ TEST_F(TokenBucketImplTest, PartialConsumption) { EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable()); } +// Test reset functionality. +TEST_F(TokenBucketImplTest, Reset) { + TokenBucketImpl token_bucket{16, time_system_, 16}; + token_bucket.reset(1); + EXPECT_EQ(1, token_bucket.consume(2, true)); + EXPECT_EQ(std::chrono::milliseconds(63), token_bucket.nextTokenAvailable()); +} + } // namespace Envoy diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index 3aabd78412062..e4527d30a41a9 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -74,17 +74,15 @@ TEST_P(FaultIntegrationTestAllProtocols, ResponseRateLimitNoTrailers) { codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(default_response_headers_, false); - Buffer::OwnedImpl data(std::string(1152, 'a')); + Buffer::OwnedImpl data(std::string(127, 'a')); upstream_request_->encodeData(data, true); - decoder->waitForBodyData(1024); - // Advance time and wait for a tick worth of data. - simTime().sleep(std::chrono::milliseconds(63)); - decoder->waitForBodyData(1088); + // Wait for a tick worth of data. + decoder->waitForBodyData(64); - // Advance time and wait for a tick worth of data and end stream. + // Wait for a tick worth of data and end stream. simTime().sleep(std::chrono::milliseconds(63)); - decoder->waitForBodyData(1152); + decoder->waitForBodyData(127); decoder->waitForEndStream(); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); @@ -110,13 +108,15 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfig) { // Verify response body throttling. upstream_request_->encodeHeaders(default_response_headers_, false); - Buffer::OwnedImpl data(std::string(1025, 'a')); + Buffer::OwnedImpl data(std::string(128, 'a')); upstream_request_->encodeData(data, true); - decoder->waitForBodyData(1024); - // Advance time and wait for a tick worth of data and end stream. + // Wait for a tick worth of data. + decoder->waitForBodyData(64); + + // Wait for a tick worth of data and end stream. simTime().sleep(std::chrono::milliseconds(63)); - decoder->waitForBodyData(1025); + decoder->waitForBodyData(128); decoder->waitForEndStream(); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); @@ -149,17 +149,15 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(default_response_headers_, false); - Buffer::OwnedImpl data(std::string(1152, 'a')); + Buffer::OwnedImpl data(std::string(127, 'a')); upstream_request_->encodeData(data, false); - decoder->waitForBodyData(1024); - // Advance time and wait for a tick worth of data. - simTime().sleep(std::chrono::milliseconds(63)); - decoder->waitForBodyData(1088); + // Wait for a tick worth of data. + decoder->waitForBodyData(64); // Advance time and wait for a tick worth of data. simTime().sleep(std::chrono::milliseconds(63)); - decoder->waitForBodyData(1152); + decoder->waitForBodyData(127); // Send trailers and wait for end stream. Http::TestHeaderMapImpl trailers{{"hello", "world"}}; @@ -179,19 +177,17 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyNotFlushed) { codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(default_response_headers_, false); - Buffer::OwnedImpl data(std::string(1152, 'a')); + Buffer::OwnedImpl data(std::string(128, 'a')); upstream_request_->encodeData(data, false); Http::TestHeaderMapImpl trailers{{"hello", "world"}}; upstream_request_->encodeTrailers(trailers); - decoder->waitForBodyData(1024); - // Advance time and wait for a tick worth of data. - simTime().sleep(std::chrono::milliseconds(63)); - decoder->waitForBodyData(1088); + // Wait for a tick worth of data. + decoder->waitForBodyData(64); // Advance time and wait for a tick worth of data, trailers, and end stream. simTime().sleep(std::chrono::milliseconds(63)); - decoder->waitForBodyData(1152); + decoder->waitForBodyData(128); decoder->waitForEndStream(); EXPECT_NE(nullptr, decoder->trailers()); From dc3467adacb8f2b0393583857e709c06d76a2441 Mon Sep 17 00:00:00 2001 From: Nicolas Flacco <47160394+FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg@users.noreply.github.com> Date: Thu, 18 Apr 2019 14:58:13 -0700 Subject: [PATCH 150/165] Batch implementation with timer (#6452) Signed-off-by: Nicolas Flacco --- .../network/redis_proxy/v2/redis_proxy.proto | 21 +++ docs/root/intro/version_history.rst | 3 + .../filters/network/common/redis/client.h | 12 ++ .../network/common/redis/client_impl.cc | 29 +++- .../network/common/redis/client_impl.h | 8 + .../extensions/health_checkers/redis/redis.h | 10 ++ .../network/common/redis/client_impl_test.cc | 163 +++++++++++++++++- .../redis_proxy_integration_test.cc | 51 ++++++ 8 files changed, 293 insertions(+), 4 deletions(-) diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 16196cc07a3b1..eec8c3f409544 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -59,6 +59,27 @@ message RedisProxy { // need to be known to the cluster manager. If the command cannot be redirected, then the // original error is passed downstream unchanged. By default, this support is not enabled. bool enable_redirection = 3; + + // Maximum size of encoded request buffer before flush is triggered and encoded requests + // are sent upstream. If this is unset, the buffer flushes whenever it receives data + // and performs no batching. + // This feature makes it possible for multiple clients to send requests to Envoy and have + // them batched- for example if one is running several worker processes, each with its own + // Redis connection. There is no benefit to using this with a single downstream process. + // Recommended size (if enabled) is 1024 bytes. + uint32 max_buffer_size_before_flush = 4; + + // The encoded request buffer is flushed N milliseconds after the first request has been + // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. + // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, + // the timer should be set according to the number of clients, overall request rate and + // desired maximum latency for a single command. For example, if there are many requests + // being batched together at a high rate, the buffer will likely be filled before the timer + // fires. Alternatively, if the request rate is lower the buffer will not be filled as often + // before the timer fires. + // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter + // defaults to 3ms. + google.protobuf.Duration buffer_flush_timeout = 5 [(gogoproto.stdduration) = true]; } // Network settings for the connection pool to the upstream clusters. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 3458bd0d5b55a..43be66773e8d1 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -10,6 +10,9 @@ Version history * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. * redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. * redis: add support for zpopmax and zpopmin commands. +* redis: added + :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and + :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. * router: added ability to control retry back-off intervals via :ref:`retry policy `. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h index 59c5c88080c9b..4a7c53912afc3 100644 --- a/source/extensions/filters/network/common/redis/client.h +++ b/source/extensions/filters/network/common/redis/client.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/upstream/cluster_manager.h" #include "extensions/filters/network/common/redis/codec_impl.h" @@ -110,6 +112,16 @@ class Config { * processed. */ virtual bool enableRedirection() const PURE; + + /** + * @return buffer size for batching commands for a single upstream host. + */ + virtual uint32_t maxBufferSizeBeforeFlush() const PURE; + + /** + * @return timeout for batching commands for a single upstream host. + */ + virtual std::chrono::milliseconds bufferFlushTimeoutInMs() const PURE; }; /** diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc index 1040036560488..fa4bb4bb5c766 100644 --- a/source/extensions/filters/network/common/redis/client_impl.cc +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -11,7 +11,14 @@ ConfigImpl::ConfigImpl( const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) : op_timeout_(PROTOBUF_GET_MS_REQUIRED(config, op_timeout)), enable_hashtagging_(config.enable_hashtagging()), - enable_redirection_(config.enable_redirection()) {} + enable_redirection_(config.enable_redirection()), + max_buffer_size_before_flush_( + config.max_buffer_size_before_flush()), // This is a scalar, so default is zero. + buffer_flush_timeout_(PROTOBUF_GET_MS_OR_DEFAULT( + config, buffer_flush_timeout, + 3)) // Default timeout is 3ms. If max_buffer_size_before_flush is zero, this is not used + // as the buffer is flushed on each request immediately. +{} ClientPtr ClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, EncoderPtr&& encoder, DecoderFactory& decoder_factory, @@ -31,7 +38,8 @@ ClientImpl::ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dis EncoderPtr&& encoder, DecoderFactory& decoder_factory, const Config& config) : host_(host), encoder_(std::move(encoder)), decoder_(decoder_factory.create(*this)), config_(config), - connect_or_op_timer_(dispatcher.createTimer([this]() -> void { onConnectOrOpTimeout(); })) { + connect_or_op_timer_(dispatcher.createTimer([this]() -> void { onConnectOrOpTimeout(); })), + flush_timer_(dispatcher.createTimer([this]() -> void { flushBufferAndResetTimer(); })) { host->cluster().stats().upstream_cx_total_.inc(); host->stats().cx_total_.inc(); host->cluster().stats().upstream_cx_active_.inc(); @@ -48,12 +56,27 @@ ClientImpl::~ClientImpl() { void ClientImpl::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } +void ClientImpl::flushBufferAndResetTimer() { + if (flush_timer_->enabled()) { + flush_timer_->disableTimer(); + } + connection_->write(encoder_buffer_, false); +} + PoolRequest* ClientImpl::makeRequest(const RespValue& request, PoolCallbacks& callbacks) { ASSERT(connection_->state() == Network::Connection::State::Open); + const bool empty_buffer = encoder_buffer_.length() == 0; + pending_requests_.emplace_back(*this, callbacks); encoder_->encode(request, encoder_buffer_); - connection_->write(encoder_buffer_, false); + + // If buffer is full, flush. If the the buffer was empty before the request, start the timer. + if (encoder_buffer_.length() >= config_.maxBufferSizeBeforeFlush()) { + flushBufferAndResetTimer(); + } else if (empty_buffer) { + flush_timer_->enableTimer(std::chrono::milliseconds(config_.bufferFlushTimeoutInMs())); + } // Only boost the op timeout if: // - We are not already connected. Otherwise, we are governed by the connect timeout and the timer diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h index 5a44d39e82687..fd9b7b7af7b80 100644 --- a/source/extensions/filters/network/common/redis/client_impl.h +++ b/source/extensions/filters/network/common/redis/client_impl.h @@ -41,11 +41,17 @@ class ConfigImpl : public Config { std::chrono::milliseconds opTimeout() const override { return op_timeout_; } bool enableHashtagging() const override { return enable_hashtagging_; } bool enableRedirection() const override { return enable_redirection_; } + uint32_t maxBufferSizeBeforeFlush() const override { return max_buffer_size_before_flush_; } + std::chrono::milliseconds bufferFlushTimeoutInMs() const override { + return buffer_flush_timeout_; + } private: const std::chrono::milliseconds op_timeout_; const bool enable_hashtagging_; const bool enable_redirection_; + const uint32_t max_buffer_size_before_flush_; + const std::chrono::milliseconds buffer_flush_timeout_; }; class ClientImpl : public Client, public DecoderCallbacks, public Network::ConnectionCallbacks { @@ -62,6 +68,7 @@ class ClientImpl : public Client, public DecoderCallbacks, public Network::Conne } void close() override; PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) override; + void flushBufferAndResetTimer(); private: struct UpstreamReadFilter : public Network::ReadFilterBaseImpl { @@ -111,6 +118,7 @@ class ClientImpl : public Client, public DecoderCallbacks, public Network::Conne std::list pending_requests_; Event::TimerPtr connect_or_op_timer_; bool connected_{}; + Event::TimerPtr flush_timer_; }; class ClientFactoryImpl : public ClientFactory { diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 7c93b017b5a90..791171d2d83e2 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/health_checker/redis/v2/redis.pb.validate.h" #include "common/upstream/health_checker_base_impl.h" @@ -63,6 +65,14 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { return true; } // Redirection errors are treated as check successes. + // Batching + unsigned int maxBufferSizeBeforeFlush() const override { + return 0; + } // Forces an immediate flush + std::chrono::milliseconds bufferFlushTimeoutInMs() const override { + return std::chrono::milliseconds(1); + } + // Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override; void onFailure() override; diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc index d5c0e2ac7fae6..c1d8269f5b024 100644 --- a/test/extensions/filters/network/common/redis/client_impl_test.cc +++ b/test/extensions/filters/network/common/redis/client_impl_test.cc @@ -64,6 +64,11 @@ class RedisClientImplTest : public testing::Test, public Common::Redis::DecoderF upstream_connection_ = new NiceMock(); Upstream::MockHost::MockCreateConnectionData conn_info; conn_info.connection_ = upstream_connection_; + + // Create timers in order they are created in client_impl.cc + connect_or_op_timer_ = new Event::MockTimer(&dispatcher_); + flush_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); EXPECT_CALL(*host_, createConnection_(_, _)).WillOnce(Return(conn_info)); EXPECT_CALL(*upstream_connection_, addReadFilter(_)) @@ -89,7 +94,8 @@ class RedisClientImplTest : public testing::Test, public Common::Redis::DecoderF const std::string cluster_name_{"foo"}; std::shared_ptr host_{new NiceMock()}; Event::MockDispatcher dispatcher_; - Event::MockTimer* connect_or_op_timer_{new Event::MockTimer(&dispatcher_)}; + Event::MockTimer* flush_timer_{}; + Event::MockTimer* connect_or_op_timer_{}; MockEncoder* encoder_{new MockEncoder()}; MockDecoder* decoder_{new MockDecoder()}; Common::Redis::DecoderCallbacks* callbacks_{}; @@ -99,6 +105,138 @@ class RedisClientImplTest : public testing::Test, public Common::Redis::DecoderF ClientPtr client_; }; +TEST_F(RedisClientImplTest, BatchWithZeroBufferAndTimeout) { + // Basic test with a single request, default buffer size (0) and timeout (0). + // This means we do not batch requests, and thus the flush timer is never enabled. + InSequence s; + + setup(); + + // Make the dummy request + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + // Process the dummy request + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +class ConfigBufferSizeGTSingleRequest : public Config { + bool disableOutlierEvents() const override { return false; } + std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); } + bool enableHashtagging() const override { return false; } + bool enableRedirection() const override { return false; } + unsigned int maxBufferSizeBeforeFlush() const override { return 8; } + std::chrono::milliseconds bufferFlushTimeoutInMs() const override { + return std::chrono::milliseconds(1); + } +}; + +TEST_F(RedisClientImplTest, BatchWithTimerFiring) { + // With a flush buffer > single request length, the flush timer comes into play. + // In this test, we make a single request that doesn't fill the buffer, so we + // have to wait for the flush timer to fire. + InSequence s; + + setup(std::make_unique()); + + // Make the dummy request + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enableTimer(_)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + // Pretend the the flush timer fires. + // The timer callback is the general-purpose flush function, also used when + // the buffer is filled. If the buffer fills before the timer fires, we need + // to check if the timer is active and cancel it. However, if the timer fires + // the callback, this internal check returns false as the timer is finished. + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); + flush_timer_->invokeCallback(); + + // Process the dummy request + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +TEST_F(RedisClientImplTest, BatchWithTimerCancelledByBufferFlush) { + // Expanding on the previous test, let's the flush buffer is filled by two requests. + // In this test, we make a single request that doesn't fill the buffer, and the timer + // starts. However, a second request comes in, which should cancel the timer, such + // that it is never invoked. + InSequence s; + + setup(std::make_unique()); + + // Make the dummy request (doesn't fill buffer, starts timer) + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enableTimer(_)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + // Make a second dummy request (fills buffer, cancels timer) + Common::Redis::RespValue request2; + MockPoolCallbacks callbacks2; + EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(true)); + ; + EXPECT_CALL(*flush_timer_, disableTimer()); + PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); + EXPECT_NE(nullptr, handle2); + + // Process the dummy requests + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks2, onResponse_(Ref(response2))); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response2)); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + TEST_F(RedisClientImplTest, Basic) { InSequence s; @@ -107,6 +245,7 @@ TEST_F(RedisClientImplTest, Basic) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -115,6 +254,7 @@ TEST_F(RedisClientImplTest, Basic) { Common::Redis::RespValue request2; MockPoolCallbacks callbacks2; EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); EXPECT_NE(nullptr, handle2); @@ -153,6 +293,7 @@ TEST_F(RedisClientImplTest, Cancel) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -161,6 +302,7 @@ TEST_F(RedisClientImplTest, Cancel) { Common::Redis::RespValue request2; MockPoolCallbacks callbacks2; EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); EXPECT_NE(nullptr, handle2); @@ -202,6 +344,7 @@ TEST_F(RedisClientImplTest, FailAll) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -228,6 +371,7 @@ TEST_F(RedisClientImplTest, FailAllWithCancel) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -252,6 +396,7 @@ TEST_F(RedisClientImplTest, ProtocolError) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -279,6 +424,7 @@ TEST_F(RedisClientImplTest, ConnectFail) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -296,6 +442,10 @@ class ConfigOutlierDisabled : public Config { std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); } bool enableHashtagging() const override { return false; } bool enableRedirection() const override { return false; } + unsigned int maxBufferSizeBeforeFlush() const override { return 0; } + std::chrono::milliseconds bufferFlushTimeoutInMs() const override { + return std::chrono::milliseconds(0); + } }; TEST_F(RedisClientImplTest, OutlierDisabled) { @@ -306,6 +456,7 @@ TEST_F(RedisClientImplTest, OutlierDisabled) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -326,6 +477,7 @@ TEST_F(RedisClientImplTest, ConnectTimeout) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -347,6 +499,7 @@ TEST_F(RedisClientImplTest, OpTimeout) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -370,6 +523,7 @@ TEST_F(RedisClientImplTest, AskRedirection) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -378,6 +532,7 @@ TEST_F(RedisClientImplTest, AskRedirection) { Common::Redis::RespValue request2; MockPoolCallbacks callbacks2; EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); EXPECT_NE(nullptr, handle2); @@ -428,6 +583,7 @@ TEST_F(RedisClientImplTest, MovedRedirection) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -436,6 +592,7 @@ TEST_F(RedisClientImplTest, MovedRedirection) { Common::Redis::RespValue request2; MockPoolCallbacks callbacks2; EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); EXPECT_NE(nullptr, handle2); @@ -486,6 +643,7 @@ TEST_F(RedisClientImplTest, AskRedirectionNotEnabled) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -494,6 +652,7 @@ TEST_F(RedisClientImplTest, AskRedirectionNotEnabled) { Common::Redis::RespValue request2; MockPoolCallbacks callbacks2; EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); EXPECT_NE(nullptr, handle2); @@ -545,6 +704,7 @@ TEST_F(RedisClientImplTest, MovedRedirectionNotEnabled) { Common::Redis::RespValue request1; MockPoolCallbacks callbacks1; EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); EXPECT_NE(nullptr, handle1); @@ -553,6 +713,7 @@ TEST_F(RedisClientImplTest, MovedRedirectionNotEnabled) { Common::Redis::RespValue request2; MockPoolCallbacks callbacks2; EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); EXPECT_NE(nullptr, handle2); diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index 07997b76c0f1d..a51671035e807 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -65,6 +65,12 @@ const std::string CONFIG_WITH_REDIRECTION = CONFIG + R"EOF( enable_redirection: true )EOF"; +// This is a configuration with batching enabled. +const std::string CONFIG_WITH_BATCHING = CONFIG + R"EOF( + max_buffer_size_before_flush: 1024 + buffer_flush_timeout: 0.003s +)EOF"; + const std::string CONFIG_WITH_ROUTES = R"EOF( admin: access_log_path: /dev/null @@ -242,6 +248,11 @@ class RedisProxyWithRedirectionIntegrationTest : public RedisProxyIntegrationTes const std::string& asking_response = "+OK\r\n"); }; +class RedisProxyWithBatchingIntegrationTest : public RedisProxyIntegrationTest { +public: + RedisProxyWithBatchingIntegrationTest() : RedisProxyIntegrationTest(CONFIG_WITH_BATCHING, 2) {} +}; + class RedisProxyWithRoutesIntegrationTest : public RedisProxyIntegrationTest { public: RedisProxyWithRoutesIntegrationTest() : RedisProxyIntegrationTest(CONFIG_WITH_ROUTES, 6) {} @@ -255,6 +266,10 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithRedirectionIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithBatchingIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithRoutesIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); @@ -549,6 +564,42 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, IgnoreRedirectionForAsking) { asking_response.str()); } +// This test verifies that batching works properly. If batching is enabled, when multiple +// clients make a request to a Redis server within a certain time window, they will be batched +// together. The below example, two clients send "GET foo", and Redis receives those two as +// a single concatenated request. + +TEST_P(RedisProxyWithBatchingIntegrationTest, SimpleBatching) { + initialize(); + + const std::string& request = makeBulkStringArray({"get", "foo"}); + const std::string& response = "$3\r\nbar\r\n"; + + std::string proxy_to_server; + IntegrationTcpClientPtr redis_client_1 = makeTcpConnection(lookupPort("redis_proxy")); + IntegrationTcpClientPtr redis_client_2 = makeTcpConnection(lookupPort("redis_proxy")); + redis_client_1->write(request); + redis_client_2->write(request); + + FakeRawConnectionPtr fake_upstream_connection; + EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + EXPECT_TRUE(fake_upstream_connection->waitForData(request.size() * 2, &proxy_to_server)); + // The original request should be the same as the data received by the server. + EXPECT_EQ(request + request, proxy_to_server); + + EXPECT_TRUE(fake_upstream_connection->write(response + response)); + redis_client_1->waitForData(response); + redis_client_2->waitForData(response); + // The original response should be received by the fake Redis client. + EXPECT_EQ(response, redis_client_1->data()); + EXPECT_EQ(response, redis_client_2->data()); + + redis_client_1->close(); + EXPECT_TRUE(fake_upstream_connection->close()); + redis_client_2->close(); + EXPECT_TRUE(fake_upstream_connection->close()); +} + // This test verifies that it's possible to route keys to 3 different upstream pools. TEST_P(RedisProxyWithRoutesIntegrationTest, SimpleRequestAndResponseRoutedByPrefix) { From 43e06d2177c4e88fb069691863c5a583d0f0bdcb Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 18 Apr 2019 17:07:41 -0700 Subject: [PATCH 151/165] Revert dispatcher stats (#6649) Signed-off-by: Matt Klein --- bazel/repository_locations.bzl | 10 ++-- docs/root/intro/version_history.rst | 1 - docs/root/operations/operations.rst | 2 - docs/root/operations/performance.rst | 40 ---------------- include/envoy/event/dispatcher.h | 27 ----------- include/envoy/server/worker.h | 6 +-- source/common/event/dispatcher_impl.cc | 10 ---- source/common/event/dispatcher_impl.h | 5 -- source/common/event/libevent_scheduler.cc | 58 ----------------------- source/common/event/libevent_scheduler.h | 15 ------ source/server/listener_manager_impl.cc | 10 ++-- source/server/listener_manager_impl.h | 1 - source/server/server.cc | 3 -- source/server/worker_impl.cc | 3 +- source/server/worker_impl.h | 2 +- test/common/event/BUILD | 1 - test/common/event/dispatcher_impl_test.cc | 12 ----- test/mocks/event/mocks.cc | 1 - test/mocks/event/mocks.h | 1 - test/mocks/server/mocks.h | 2 +- test/mocks/thread_local/mocks.cc | 1 - test/server/listener_manager_impl_test.cc | 16 +++---- test/server/worker_impl_test.cc | 6 +-- tools/spelling_dictionary.txt | 1 - 24 files changed, 23 insertions(+), 211 deletions(-) delete mode 100644 docs/root/operations/performance.rst diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index e1fdafa920036..afac13ec52d8a 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -129,13 +129,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/benchmark/archive/505be96ab23056580a3a2315abba048f4428b04e.tar.gz"], ), com_github_libevent_libevent = dict( - sha256 = "ab3af422b7e4c6d9276b3637d87edb6cf628fd91c9206260b759778c3a28b330", - # This SHA includes the new "prepare" and "check" watchers, used for event loop performance - # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition - # in the watchers (see https://github.com/libevent/libevent/pull/802). - # TODO(mergeconflict): Update to v2.2 when it is released. - strip_prefix = "libevent-1cd8830de27c30c5324c75bfb6012c969c09ca2c", - urls = ["https://github.com/libevent/libevent/archive/1cd8830de27c30c5324c75bfb6012c969c09ca2c.tar.gz"], + sha256 = "53d4bb49b837944893b7caf9ae8eb43e94690ee5babea6469cc4a928722f99b1", + strip_prefix = "libevent-c4fbae3ae6166dddfa126734edd63213afa14dce", + urls = ["https://github.com/libevent/libevent/archive/c4fbae3ae6166dddfa126734edd63213afa14dce.tar.gz"], ), com_github_madler_zlib = dict( sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff", diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 43be66773e8d1..e496730c8b17c 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -4,7 +4,6 @@ Version history 1.11.0 (Pending) ================ * dubbo_proxy: support the :ref:`Dubbo proxy filter `. -* event: added :ref:`loop duration and poll delay statistics `. * ext_authz: added option to `ext_authz` that allows the filter clearing route cache. * eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. diff --git a/docs/root/operations/operations.rst b/docs/root/operations/operations.rst index 54f9c4a89e0be..98005b9977ba6 100644 --- a/docs/root/operations/operations.rst +++ b/docs/root/operations/operations.rst @@ -13,5 +13,3 @@ Operations and administration runtime fs_flags traffic_tapping - performance - diff --git a/docs/root/operations/performance.rst b/docs/root/operations/performance.rst deleted file mode 100644 index fb92de1d3cbb4..0000000000000 --- a/docs/root/operations/performance.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _operations_performance: - -Performance -=========== - -Envoy is architected to optimize scalability and resource utilization by running an event loop on a -:ref:`small number of threads `. The "main" thread is responsible for -control plane processing, and each "worker" thread handles a portion of the data plane processing. -Envoy exposes two statistics to monitor performance of the event loops on all these threads. - -* **Loop duration:** Some amount of processing is done on each iteration of the event loop. This - amount will naturally vary with changes in load. However, if one or more threads have an unusually - long-tailed loop duration, it may indicate a performance issue. For example, work might not be - distributed fairly across the worker threads, or there may be a long blocking operation in an - extension that's impeding progress. - -* **Poll delay:** On each iteration of the event loop, the event dispatcher polls for I/O events - and "wakes up" either when some I/O events are ready to be processed or when a timeout fires, - whichever occurs first. In the case of a timeout, we can measure the difference between the expected - wakeup time and the actual wakeup time after polling; this difference is called the "poll delay." - It's normal to see some small poll delay, usually equal to the kernel scheduler's "time slice" or - "quantum"---this depends on the specific operating system on which Envoy is running---but if this - number elevates substantially above its normal observed baseline, it likely indicates kernel - scheduler delays. - -Statistics ----------- - -The event dispatcher for the main thread has a statistics tree rooted at *server.dispatcher.*, and -the event dispatcher for each worker thread has a statistics tree rooted at -*listener_manager.worker_.dispatcher.*, each with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - loop_duration_us, Histogram, Event loop durations in microseconds - poll_delay_us, Histogram, Polling delays in microseconds - -Note that any auxiliary threads are not included here. diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 0024b3a2e7795..1e0b52a10f270 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -17,29 +17,11 @@ #include "envoy/network/listen_socket.h" #include "envoy/network/listener.h" #include "envoy/network/transport_socket.h" -#include "envoy/stats/scope.h" -#include "envoy/stats/stats_macros.h" #include "envoy/thread/thread.h" namespace Envoy { namespace Event { -/** - * All dispatcher stats. @see stats_macros.h - */ -// clang-format off -#define ALL_DISPATCHER_STATS(HISTOGRAM) \ - HISTOGRAM(loop_duration_us) \ - HISTOGRAM(poll_delay_us) -// clang-format on - -/** - * Struct definition for all dispatcher stats. @see stats_macros.h - */ -struct DispatcherStats { - ALL_DISPATCHER_STATS(GENERATE_HISTOGRAM_STRUCT) -}; - /** * Callback invoked when a dispatcher post() runs. */ @@ -57,15 +39,6 @@ class Dispatcher { */ virtual TimeSource& timeSource() PURE; - /** - * Initialize stats for this dispatcher. Note that this can't generally be done at construction - * time, since the main and worker thread dispatchers are constructed before - * ThreadLocalStoreImpl::initializeThreading. - * @param scope the scope to contain the new per-dispatcher stats created here. - * @param prefix the stats prefix to identify this dispatcher. - */ - virtual void initializeStats(Stats::Scope& scope, const std::string& prefix) PURE; - /** * Clear any items in the deferred deletion queue. */ diff --git a/include/envoy/server/worker.h b/include/envoy/server/worker.h index b7af5aae7d333..cb845b318158c 100644 --- a/include/envoy/server/worker.h +++ b/include/envoy/server/worker.h @@ -38,12 +38,10 @@ class Worker { virtual uint64_t numConnections() PURE; /** - * Start the worker thread. The worker will output thread-specific stats under the given scope. + * Start the worker thread. * @param guard_dog supplies the guard dog to use for thread watching. - * @param scope the scope to add the new worker stats to. - * @param prefix the prefix for the new stats, identifying this worker. */ - virtual void start(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix) PURE; + virtual void start(GuardDog& guard_dog) PURE; /** * Stop the worker thread. diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index 9d7badc88f4c0..8e737de4de17f 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -41,13 +41,6 @@ DispatcherImpl::DispatcherImpl(Buffer::WatermarkFactoryPtr&& factory, Api::Api& DispatcherImpl::~DispatcherImpl() {} -void DispatcherImpl::initializeStats(Stats::Scope& scope, const std::string& prefix) { - stats_prefix_ = prefix + "dispatcher"; - stats_ = std::make_unique( - DispatcherStats{ALL_DISPATCHER_STATS(POOL_HISTOGRAM_PREFIX(scope, stats_prefix_ + "."))}); - base_scheduler_.initializeStats(stats_.get()); -} - void DispatcherImpl::clearDeferredDeleteList() { ASSERT(isThreadSafe()); std::vector* to_delete = current_to_delete_; @@ -165,9 +158,6 @@ void DispatcherImpl::post(std::function callback) { void DispatcherImpl::run(RunType type) { run_tid_ = api_.threadFactory().currentThreadId(); - if (!stats_prefix_.empty()) { - ENVOY_LOG(debug, "running {} on thread {}", stats_prefix_, run_tid_->debugString()); - } // Flush all post callbacks before we run the event loop. We do this because there are post // callbacks that have to get run before the initial event loop starts running. libevent does diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index b712f22d879e4..a51dce3e6aaff 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include "envoy/api/api.h" @@ -11,7 +10,6 @@ #include "envoy/event/deferred_deletable.h" #include "envoy/event/dispatcher.h" #include "envoy/network/connection_handler.h" -#include "envoy/stats/scope.h" #include "common/common/logger.h" #include "common/common/thread.h" @@ -38,7 +36,6 @@ class DispatcherImpl : Logger::Loggable, public Dispatcher { // Event::Dispatcher TimeSource& timeSource() override { return api_.timeSource(); } - void initializeStats(Stats::Scope& scope, const std::string& prefix) override; void clearDeferredDeleteList() override; Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, @@ -75,8 +72,6 @@ class DispatcherImpl : Logger::Loggable, public Dispatcher { bool isThreadSafe() const { return run_tid_ == nullptr || run_tid_->isCurrentThreadId(); } Api::Api& api_; - std::string stats_prefix_; - std::unique_ptr stats_; Thread::ThreadIdPtr run_tid_; Buffer::WatermarkFactoryPtr buffer_factory_; LibeventScheduler base_scheduler_; diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index df22b45ba7371..5b35ffd18447e 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -3,17 +3,9 @@ #include "common/common/assert.h" #include "common/event/timer_impl.h" -#include "event2/util.h" - namespace Envoy { namespace Event { -namespace { -void recordTimeval(Stats::Histogram& histogram, const timeval& tv) { - histogram.recordValue(tv.tv_sec * 1000000 + tv.tv_usec); -} -} // namespace - LibeventScheduler::LibeventScheduler() : libevent_(event_base_new()) { // The dispatcher won't work as expected if libevent hasn't been configured to use threads. RELEASE_ASSERT(Libevent::Global::initialized(), ""); @@ -49,55 +41,5 @@ void LibeventScheduler::run(Dispatcher::RunType mode) { void LibeventScheduler::loopExit() { event_base_loopexit(libevent_.get(), nullptr); } -void LibeventScheduler::initializeStats(DispatcherStats* stats) { - stats_ = stats; - // These are thread safe. - evwatch_prepare_new(libevent_.get(), &onPrepare, this); - evwatch_check_new(libevent_.get(), &onCheck, this); -} - -void LibeventScheduler::onPrepare(evwatch*, const evwatch_prepare_cb_info* info, void* arg) { - // `self` is `this`, passed in from evwatch_prepare_new. - auto self = static_cast(arg); - - // Record poll timeout and prepare time for this iteration of the event loop. The timeout is the - // expected polling duration, whereas the actual polling duration will be the difference measured - // between the prepare time and the check time immediately after polling. These are compared in - // onCheck to compute the poll_delay stat. - self->timeout_set_ = evwatch_prepare_get_timeout(info, &self->timeout_); - evutil_gettimeofday(&self->prepare_time_, nullptr); - - // If we have a check time available from a previous iteration of the event loop (that is, all but - // the first), compute the loop_duration stat. - if (self->check_time_.tv_sec != 0) { - timeval delta; - evutil_timersub(&self->prepare_time_, &self->check_time_, &delta); - recordTimeval(self->stats_->loop_duration_us_, delta); - } -} - -void LibeventScheduler::onCheck(evwatch*, const evwatch_check_cb_info*, void* arg) { - // `self` is `this`, passed in from evwatch_check_new. - auto self = static_cast(arg); - - // Record check time for this iteration of the event loop. Use this together with prepare time - // from above to compute the actual polling duration, and store it for the next iteration of the - // event loop to compute the loop duration. - evutil_gettimeofday(&self->check_time_, nullptr); - if (self->timeout_set_) { - timeval delta, delay; - evutil_timersub(&self->check_time_, &self->prepare_time_, &delta); - evutil_timersub(&delta, &self->timeout_, &delay); - - // Delay can be negative, meaning polling completed early. This happens in normal operation, - // either because I/O was ready before we hit the timeout, or just because the kernel was - // feeling saucy. Disregard negative delays in stats, since they don't indicate anything - // particularly useful. - if (delay.tv_sec >= 0) { - recordTimeval(self->stats_->poll_delay_us_, delay); - } - } -} - } // namespace Event } // namespace Envoy diff --git a/source/common/event/libevent_scheduler.h b/source/common/event/libevent_scheduler.h index b9157bf4059b5..5a41e1ccf6c4f 100644 --- a/source/common/event/libevent_scheduler.h +++ b/source/common/event/libevent_scheduler.h @@ -6,7 +6,6 @@ #include "common/event/libevent.h" #include "event2/event.h" -#include "event2/watch.h" namespace Envoy { namespace Event { @@ -41,22 +40,8 @@ class LibeventScheduler : public Scheduler { */ event_base& base() { return *libevent_; } - /** - * Start writing stats once thread-local storage is ready to receive them (see - * ThreadLocalStoreImpl::initializeThreading). - */ - void initializeStats(DispatcherStats* stats_); - private: - static void onPrepare(evwatch*, const evwatch_prepare_cb_info* info, void* arg); - static void onCheck(evwatch*, const evwatch_check_cb_info*, void* arg); - Libevent::BasePtr libevent_; - DispatcherStats* stats_{}; // stats owned by the containing DispatcherImpl - bool timeout_set_{}; // whether there is a poll timeout in the current event loop iteration - timeval timeout_{}; // the poll timeout for the current event loop iteration, if available - timeval prepare_time_{}; // timestamp immediately before polling - timeval check_time_{}; // timestamp immediately after polling }; } // namespace Event diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index b8bc718037e2b..fc64828f82a7b 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -672,8 +672,7 @@ void ListenerImpl::setSocket(const Network::SocketSharedPtr& socket) { ListenerManagerImpl::ListenerManagerImpl(Instance& server, ListenerComponentFactory& listener_factory, WorkerFactory& worker_factory) - : server_(server), factory_(listener_factory), - scope_(server.stats().createScope("listener_manager.")), stats_(generateStats(*scope_)), + : server_(server), factory_(listener_factory), stats_(generateStats(server.stats())), config_tracker_entry_(server.admin().getConfigTracker().add( "listeners", [this] { return dumpListenerConfigs(); })) { for (uint32_t i = 0; i < server.options().concurrency(); i++) { @@ -719,7 +718,9 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { } ListenerManagerStats ListenerManagerImpl::generateStats(Stats::Scope& scope) { - return {ALL_LISTENER_MANAGER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; + const std::string final_prefix = "listener_manager."; + return {ALL_LISTENER_MANAGER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix), + POOL_GAUGE_PREFIX(scope, final_prefix))}; } bool ListenerManagerImpl::addOrUpdateListener(const envoy::api::v2::Listener& config, @@ -1005,13 +1006,12 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog) { ENVOY_LOG(info, "all dependencies initialized. starting workers"); ASSERT(!workers_started_); workers_started_ = true; - uint32_t i = 0; for (const auto& worker : workers_) { ASSERT(warming_listeners_.empty()); for (const auto& listener : active_listeners_) { addListenerToWorker(*worker, *listener); } - worker->start(guard_dog, *scope_, fmt::format("worker_{}.", i++)); + worker->start(guard_dog); } } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index b27780fb486c8..41da1135d4443 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -177,7 +177,6 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable draining_listeners_; std::list workers_; bool workers_started_{}; - Stats::ScopePtr scope_; ListenerManagerStats stats_; ConfigTracker::EntryOwnerPtr config_tracker_entry_; LdsApiPtr lds_api_; diff --git a/source/server/server.cc b/source/server/server.cc index 38703d0d43a53..a75d0d291ba47 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -311,9 +311,6 @@ void InstanceImpl::initialize(const Options& options, // We can now initialize stats for threading. stats_store_.initializeThreading(*dispatcher_, thread_local_); - // It's now safe to start writing stats from the main thread's dispatcher. - dispatcher_->initializeStats(stats_store_, "server."); - // Runtime gets initialized before the main configuration since during main configuration // load things may grab a reference to the loader for later use. runtime_singleton_ = std::make_unique( diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index 40cf1a64674d3..660d1ac1cf739 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -67,11 +67,10 @@ void WorkerImpl::removeListener(Network::ListenerConfig& listener, }); } -void WorkerImpl::start(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix) { +void WorkerImpl::start(GuardDog& guard_dog) { ASSERT(!thread_); thread_ = api_.threadFactory().createThread([this, &guard_dog]() -> void { threadRoutine(guard_dog); }); - dispatcher_->initializeStats(scope, prefix); } void WorkerImpl::stop() { diff --git a/source/server/worker_impl.h b/source/server/worker_impl.h index 1b9c6bb96c09b..b59c7356f2134 100644 --- a/source/server/worker_impl.h +++ b/source/server/worker_impl.h @@ -44,7 +44,7 @@ class WorkerImpl : public Worker, Logger::Loggable { void addListener(Network::ListenerConfig& listener, AddListenerCompletion completion) override; uint64_t numConnections() override; void removeListener(Network::ListenerConfig& listener, std::function completion) override; - void start(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix) override; + void start(GuardDog& guard_dog) override; void stop() override; void stopListener(Network::ListenerConfig& listener) override; void stopListeners() override; diff --git a/test/common/event/BUILD b/test/common/event/BUILD index 4e432fb57625e..0a5ad582e66b2 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -17,7 +17,6 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/stats:isolated_store_lib", "//test/mocks:common_lib", - "//test/mocks/stats:stats_mocks", "//test/test_common:utility_lib", ], ) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index 951617feec356..af0cd7e7579c0 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -8,17 +8,12 @@ #include "common/stats/isolated_store_impl.h" #include "test/mocks/common.h" -#include "test/mocks/stats/mocks.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::_; using testing::InSequence; -using testing::NiceMock; -using testing::Return; -using testing::StartsWith; namespace Envoy { namespace Event { @@ -85,7 +80,6 @@ class DispatcherImplTest : public testing::Test { dispatcher_thread_->join(); } - NiceMock scope_; // Used in InitializeStats, must outlive dispatcher_->exit(). Api::ApiPtr api_; Thread::ThreadPtr dispatcher_thread_; DispatcherPtr dispatcher_; @@ -96,12 +90,6 @@ class DispatcherImplTest : public testing::Test { TimerPtr keepalive_timer_; }; -TEST_F(DispatcherImplTest, InitializeStats) { - EXPECT_CALL(scope_, histogram("test.dispatcher.loop_duration_us")); - EXPECT_CALL(scope_, histogram("test.dispatcher.poll_delay_us")); - dispatcher_->initializeStats(scope_, "test."); -} - TEST_F(DispatcherImplTest, Post) { dispatcher_->post([this]() { { diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index f1d5cdcadb19c..d79fe9db839cb 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -16,7 +16,6 @@ namespace Envoy { namespace Event { MockDispatcher::MockDispatcher() { - ON_CALL(*this, initializeStats(_, _)).WillByDefault(Return()); ON_CALL(*this, clearDeferredDeleteList()).WillByDefault(Invoke([this]() -> void { to_delete_.clear(); })); diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 4237a3b57ca0b..a71b0e60998c9 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -84,7 +84,6 @@ class MockDispatcher : public Dispatcher { } // Event::Dispatcher - MOCK_METHOD2(initializeStats, void(Stats::Scope&, const std::string&)); MOCK_METHOD0(clearDeferredDeleteList, void()); MOCK_METHOD2(createServerConnection_, Network::Connection*(Network::ConnectionSocket* socket, diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index cc99817ff7064..3374d8b5f8b85 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -307,7 +307,7 @@ class MockWorker : public Worker { MOCK_METHOD0(numConnections, uint64_t()); MOCK_METHOD2(removeListener, void(Network::ListenerConfig& listener, std::function completion)); - MOCK_METHOD3(start, void(GuardDog& guard_dog, Stats::Scope& scope, const std::string& prefix)); + MOCK_METHOD1(start, void(GuardDog& guard_dog)); MOCK_METHOD0(stop, void()); MOCK_METHOD1(stopListener, void(Network::ListenerConfig& listener)); MOCK_METHOD0(stopListeners, void()); diff --git a/test/mocks/thread_local/mocks.cc b/test/mocks/thread_local/mocks.cc index 5ccc69fcf21d9..cfabd7a7f52f0 100644 --- a/test/mocks/thread_local/mocks.cc +++ b/test/mocks/thread_local/mocks.cc @@ -13,7 +13,6 @@ MockInstance::MockInstance() { ON_CALL(*this, allocateSlot()).WillByDefault(Invoke(this, &MockInstance::allocateSlot_)); ON_CALL(*this, runOnAllThreads(_)).WillByDefault(Invoke(this, &MockInstance::runOnAllThreads_)); ON_CALL(*this, shutdownThread()).WillByDefault(Invoke(this, &MockInstance::shutdownThread_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); } MockInstance::~MockInstance() { shutdownThread_(); } diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 87399c190bb8c..53f9e086ba7ee 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -771,7 +771,7 @@ version_info: version2 // Start workers. EXPECT_CALL(*worker_, addListener(_, _)); - EXPECT_CALL(*worker_, start(_, _, _)); + EXPECT_CALL(*worker_, start(_)); manager_->startWorkers(guard_dog_); worker_->callAddCompletion(true); @@ -954,7 +954,7 @@ version_info: version5 TEST_F(ListenerManagerImplTest, AddDrainingListener) { InSequence s; - EXPECT_CALL(*worker_, start(_, _, _)); + EXPECT_CALL(*worker_, start(_)); manager_->startWorkers(guard_dog_); // Add foo listener directly into active. @@ -1003,7 +1003,7 @@ TEST_F(ListenerManagerImplTest, AddDrainingListener) { TEST_F(ListenerManagerImplTest, CantBindSocket) { InSequence s; - EXPECT_CALL(*worker_, start(_, _, _)); + EXPECT_CALL(*worker_, start(_)); manager_->startWorkers(guard_dog_); const std::string listener_foo_json = R"EOF( @@ -1025,7 +1025,7 @@ TEST_F(ListenerManagerImplTest, CantBindSocket) { TEST_F(ListenerManagerImplTest, ListenerDraining) { InSequence s; - EXPECT_CALL(*worker_, start(_, _, _)); + EXPECT_CALL(*worker_, start(_)); manager_->startWorkers(guard_dog_); const std::string listener_foo_json = R"EOF( @@ -1073,7 +1073,7 @@ TEST_F(ListenerManagerImplTest, ListenerDraining) { TEST_F(ListenerManagerImplTest, RemoveListener) { InSequence s; - EXPECT_CALL(*worker_, start(_, _, _)); + EXPECT_CALL(*worker_, start(_)); manager_->startWorkers(guard_dog_); // Remove an unknown listener. @@ -1149,7 +1149,7 @@ TEST_F(ListenerManagerImplTest, RemoveListener) { TEST_F(ListenerManagerImplTest, AddListenerFailure) { InSequence s; - EXPECT_CALL(*worker_, start(_, _, _)); + EXPECT_CALL(*worker_, start(_)); manager_->startWorkers(guard_dog_); // Add foo listener into active. @@ -1197,7 +1197,7 @@ TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { InSequence s; - EXPECT_CALL(*worker_, start(_, _, _)); + EXPECT_CALL(*worker_, start(_)); manager_->startWorkers(guard_dog_); // Add foo listener into warming. @@ -1250,7 +1250,7 @@ TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { TEST_F(ListenerManagerImplTest, EarlyShutdown) { // If stopWorkers is called before the workers are started, it should be a no-op: they should be // neither started nor stopped. - EXPECT_CALL(*worker_, start(_, _, _)).Times(0); + EXPECT_CALL(*worker_, start(_)).Times(0); EXPECT_CALL(*worker_, stop()).Times(0); manager_->stopWorkers(); } diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index 6e669107f12d6..c59063e3e3315 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -71,8 +71,7 @@ TEST_F(WorkerImplTest, BasicFlow) { ci.setReady(); }); - NiceMock store; - worker_.start(guard_dog_, store, "test"); + worker_.start(guard_dog_); ci.waitReady(); // After a worker is started adding/stopping/removing a listener happens on the worker thread. @@ -141,8 +140,7 @@ TEST_F(WorkerImplTest, ListenerException) { .WillOnce(Throw(Network::CreateListenerException("failed"))); worker_.addListener(listener, [](bool success) -> void { EXPECT_FALSE(success); }); - NiceMock store; - worker_.start(guard_dog_, store, "test"); + worker_.start(guard_dog_); worker_.stop(); } diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 25c768b3558d7..d16d5d0047a17 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -427,7 +427,6 @@ evbuffer evbuffers evconnlistener evthread -evwatch exe execlp facto From aedb8ca66504160dbadaf9dcad4cb9106d96518d Mon Sep 17 00:00:00 2001 From: cmluciano Date: Thu, 18 Apr 2019 23:26:05 -0400 Subject: [PATCH 152/165] build: add ppc build badge to README (#6629) Description: add ppc64le badge that links to Jenkins build server Risk Level: Low - Docs only Testing: Viewed in browser and through GH markdown viewer Docs Changes: N/A Release Notes: support ppc64le CPU architecture Fixes: #5196 Signed-off-by: Christopher M. Luciano --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 545684e3435c3..12d3df15bbfef 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,8 @@ involved and how Envoy plays a role, read the CNCF [announcement](https://www.cncf.io/blog/2017/09/13/cncf-hosts-envoy/). [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266) +[![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) +[![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/) ## Documentation From 8789695f4ca4ea5a73d2b85bb75b557ba925156b Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Fri, 19 Apr 2019 05:02:01 -0400 Subject: [PATCH 153/165] access log: add response code details to the access log formatter (#6626) Description: add formatting for the "response code details" string recently added to the StreamInfo (#6530) Risk Level: low Testing: unit tests Docs Changes: updated Release Notes: updated Signed-off-by: Elisha Ziskind --- api/envoy/data/accesslog/v2/accesslog.proto | 3 +++ docs/root/configuration/access_log.rst | 10 ++++++++++ docs/root/intro/version_history.rst | 1 + source/common/access_log/access_log_formatter.cc | 5 +++++ .../http_grpc/grpc_access_log_impl.cc | 3 +++ .../common/access_log/access_log_formatter_test.cc | 14 ++++++++++++++ .../http_grpc/grpc_access_log_impl_test.cc | 2 ++ 7 files changed, 38 insertions(+) diff --git a/api/envoy/data/accesslog/v2/accesslog.proto b/api/envoy/data/accesslog/v2/accesslog.proto index b387433394e55..f8058dedc3462 100644 --- a/api/envoy/data/accesslog/v2/accesslog.proto +++ b/api/envoy/data/accesslog/v2/accesslog.proto @@ -332,4 +332,7 @@ message HTTPResponseProperties { // Map of trailers configured to be logged. map response_trailers = 5; + + // The HTTP response code details. + string response_code_details = 6; } diff --git a/docs/root/configuration/access_log.rst b/docs/root/configuration/access_log.rst index bfc28c108fde2..4faa310a4848a 100644 --- a/docs/root/configuration/access_log.rst +++ b/docs/root/configuration/access_log.rst @@ -162,6 +162,16 @@ The following command operators are supported: TCP Not implemented ("-"). +.. _config_access_log_format_response_code_details: + +%RESPONSE_CODE_DETAILS% + HTTP + HTTP response code details provides additional information about the response code, such as + who set it (the upstream or envoy) and why. + + TCP + Not implemented ("-") + %BYTES_SENT% HTTP Body bytes sent. For WebSocket connection it will also include response header bytes. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index e496730c8b17c..cc7a73b1bd9be 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -3,6 +3,7 @@ Version history 1.11.0 (Pending) ================ +* access log: added a new field for response code details in :ref:`file access logger` and :ref:`gRPC access logger`. * dubbo_proxy: support the :ref:`Dubbo proxy filter `. * ext_authz: added option to `ext_authz` that allows the filter clearing route cache. * eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index 6f51ee2cfc615..0da5191ca7a46 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -310,6 +310,11 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { return stream_info.responseCode() ? fmt::format_int(stream_info.responseCode().value()).str() : "0"; }; + } else if (field_name == "RESPONSE_CODE_DETAILS") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.responseCodeDetails() ? stream_info.responseCodeDetails().value() + : UnspecifiedValueString; + }; } else if (field_name == "BYTES_SENT") { field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { return fmt::format_int(stream_info.bytesSent()).str(); diff --git a/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc index 24132ccf2081f..8bbea843f71e8 100644 --- a/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/http_grpc/grpc_access_log_impl.cc @@ -361,6 +361,9 @@ void HttpGrpcAccessLog::log(const Http::HeaderMap* request_headers, if (stream_info.responseCode()) { response_properties->mutable_response_code()->set_value(stream_info.responseCode().value()); } + if (stream_info.responseCodeDetails()) { + response_properties->set_response_code_details(stream_info.responseCodeDetails().value()); + } response_properties->set_response_headers_bytes(response_headers->byteSize()); response_properties->set_response_body_bytes(stream_info.bytesSent()); if (!response_headers_to_log_.empty()) { diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index adcb54a50b659..1c9741ca1a5af 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -125,6 +125,20 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_EQ("0", response_code_format.format(header, header, header, stream_info)); } + { + StreamInfoFormatter response_format("RESPONSE_CODE_DETAILS"); + absl::optional rc_details; + EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); + EXPECT_EQ("-", response_format.format(header, header, header, stream_info)); + } + + { + StreamInfoFormatter response_code_format("RESPONSE_CODE_DETAILS"); + absl::optional rc_details{"via_upstream"}; + EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); + EXPECT_EQ("via_upstream", response_code_format.format(header, header, header, stream_info)); + } + { StreamInfoFormatter bytes_sent_format("BYTES_SENT"); EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(1)); diff --git a/test/extensions/access_loggers/http_grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/http_grpc/grpc_access_log_impl_test.cc index 4e3ee8e2761f0..8d76156293352 100644 --- a/test/extensions/access_loggers/http_grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/http_grpc/grpc_access_log_impl_test.cc @@ -227,6 +227,7 @@ TEST_F(HttpGrpcAccessLogTest, Marshalling) { stream_info.addBytesReceived(10); stream_info.addBytesSent(20); stream_info.response_code_ = 200; + stream_info.response_code_details_ = "via_upstream"; ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::FaultInjected)) .WillByDefault(Return(true)); @@ -300,6 +301,7 @@ TEST_F(HttpGrpcAccessLogTest, Marshalling) { value: 200 response_headers_bytes: 10 response_body_bytes: 20 + response_code_details: "via_upstream" )EOF"); access_log_->log(&request_headers, &response_headers, nullptr, stream_info); } From 4e01b0b71d8137f4cf7ef45154a09c242c28a2b8 Mon Sep 17 00:00:00 2001 From: Michael Puncel Date: Fri, 19 Apr 2019 12:01:06 -0400 Subject: [PATCH 154/165] http timeout integration test: wait for 15s for upstream reset (#6646) This test waits for the upstream to see a reset which confirms that the router filter did the right thing when the global timeout is hit. However since this involves the network, we would occasionally see the reset after the wait call. Since we were waiting for 0ms we'd get flakes. 15s is hopefully high enough that the test will succeed reliably. Signed-off-by: Michael Puncel --- test/integration/http_timeout_integration_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/http_timeout_integration_test.cc b/test/integration/http_timeout_integration_test.cc index 00b0f4c65ea8a..11539865ffc6a 100644 --- a/test/integration/http_timeout_integration_test.cc +++ b/test/integration/http_timeout_integration_test.cc @@ -36,7 +36,7 @@ TEST_P(HttpTimeoutIntegrationTest, GlobalTimeout) { // Ensure we got a timeout downstream and canceled the upstream request. response->waitForHeaders(); - ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::milliseconds(0))); + ASSERT_TRUE(upstream_request_->waitForReset(std::chrono::seconds(15))); codec_client_->close(); From 7a8f4b06f1cf15b5d9cd4758ca6231b3033aec59 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Fri, 19 Apr 2019 09:54:15 -0700 Subject: [PATCH 155/165] docs: update docs to recommend /retest repokitteh command (#6655) Signed-off-by: Snow Pettersen --- CONTRIBUTING.md | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fa45143de8bd7..345192d66ddfe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -313,9 +313,17 @@ should only be done to correct a DCO mistake. ## Triggering CI re-run without making changes -Sometimes CI test runs fail due to obvious resource problems or other issues -which are not related to your PR. It may be desirable to re-trigger CI without -making any code changes. Consider adding an alias into your `.gitconfig` file: +To rerun failed tasks in CI, add a comment with the the line + +``` +/retest +``` + +in it. This should rebuild only the failed tasks. + +Sometimes tasks will be stuck in CI and won't be marked as failed, which means +the above command won't work. Should this happen, pushing an empty commit should +re-run all the CI tasks. Consider adding an alias into your `.gitconfig` file: ``` [alias] From 3abb7d051c55f190d19b531a2fd1360842b334a5 Mon Sep 17 00:00:00 2001 From: Fred Douglas <43351173+fredlas@users.noreply.github.com> Date: Fri, 19 Apr 2019 15:27:02 -0400 Subject: [PATCH 156/165] config: fix delta xDS's use of (un)subscribe fields, more explicit protocol spec (#6545) realized that, with the unreliable queue implementation copied from SotW xDS, delta xDS could get into a state where Envoy thinks it has subscribed, but the server hasn't heard the subscription, with no way for either to realize the mistake. I fixed that by converting the queue setup to a cleaner "do I currently want to send a request?" with the request's (un)subscriptions only populated immediately before the request is actually sent into gRPC. While doing that, I further realized there was a problem when a given resource was subscribed then unsubscribed (or reversed), all in between request sends. I made sure Envoy handles that sensibly, and added explicit requirements to the xDS protocol spec to ensure servers will also handle it sensibly. Added unit tests for those fixes. Risk Level: low Testing: added unit tests for bugs uncovered #4991 Signed-off-by: Fred Douglas --- api/XDS_PROTOCOL.md | 29 +- include/envoy/config/grpc_mux.h | 4 +- include/envoy/config/subscription.h | 10 +- .../common/config/delta_subscription_impl.h | 264 ++++++++++-------- .../config/filesystem_subscription_impl.h | 4 +- source/common/config/grpc_mux_impl.cc | 2 +- source/common/config/grpc_mux_impl.h | 8 +- .../config/grpc_mux_subscription_impl.h | 10 +- source/common/config/grpc_subscription_impl.h | 6 +- source/common/config/http_subscription_impl.h | 8 +- .../config/delta_subscription_impl_test.cc | 106 ++++++- .../config/delta_subscription_test_harness.h | 42 +-- .../filesystem_subscription_test_harness.h | 6 +- .../config/grpc_subscription_test_harness.h | 26 +- .../config/http_subscription_test_harness.h | 20 +- test/common/config/subscription_impl_test.cc | 7 +- .../common/config/subscription_test_harness.h | 6 +- test/mocks/config/mocks.cc | 2 +- test/mocks/config/mocks.h | 8 +- tools/spelling_dictionary.txt | 1 + 20 files changed, 358 insertions(+), 211 deletions(-) diff --git a/api/XDS_PROTOCOL.md b/api/XDS_PROTOCOL.md index 2401c15c9309f..c2bd0aa0cbee2 100644 --- a/api/XDS_PROTOCOL.md +++ b/api/XDS_PROTOCOL.md @@ -348,7 +348,9 @@ client spontaneously requests the "wc" resource. ![Incremental session example](diagrams/incremental.svg) On reconnect the Incremental xDS client may tell the server of its known -resources to avoid resending them over the network. +resources to avoid resending them over the network. Because no state is assumed +to be preserved from the previous stream, the reconnecting client must provide +the server with all resource names it is interested in. ![Incremental reconnect example](diagrams/incremental-reconnect.svg) @@ -358,17 +360,24 @@ identified by the alias field in the resource of a `DeltaDiscoveryResponse`. The be returned in the name field in the resource of a `DeltaDiscoveryResponse`. #### Subscribing to Resources -Envoy can send either an alias or the name of a resource in the `resource_names_subscribe` field of -a `DeltaDiscoveryRequest` in order to subscribe to a resource. Envoy should check both the names and -aliases of resources in order to determine whether the entity in question has been subscribed to. +The client can send either an alias or the name of a resource in the `resource_names_subscribe` +field of a `DeltaDiscoveryRequest` in order to subscribe to a resource. Both the names and aliases +of resources should be checked in order to determine whether the entity in question has been +subscribed to. + +A `resource_names_subscribe` field may contain resource names that the server believes the client +is already subscribed to, and furthermore has the most recent versions of. However, the server +*must* still provide those resources in the response; due to implementation details hidden from +the server, the client may have "forgotten" those resources despite apparently remaining subscribed. #### Unsubscribing from Resources -Envoy will keep track of a per resource reference count internally. This count will keep track of the -total number of aliases/resource names that are currently subscribed to. When the reference count -reaches zero, Envoy will send a `DeltaDiscoveryRequest` containing the resource name of the resource -to unsubscribe from in the `resource_names_unsubscribe` field. When Envoy unsubscribes from a resource, -it should check for both the resource name and all aliases and appropriately update all resources -that reference either. +When a client loses interest in some resources, it will indicate that with the +`resource_names_unsubscribe` field of a `DeltaDiscoveryRequest`. As with `resource_names_subscribe`, +these may be resource names or aliases. + +A `resource_names_unsubscribe` field may contain superfluous resource names, which the server +thought the client was already not subscribed to. The server must cleanly process such a request; +it can simply ignore these phantom unsubscriptions. ## REST-JSON polling subscriptions diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 6872a62d875b6..fb66a78abdcbb 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -82,7 +82,7 @@ class GrpcMux { * Start a configuration subscription asynchronously for some API type and resources. * @param type_url type URL corresponding to xDS API, e.g. * type.googleapis.com/envoy.api.v2.Cluster. - * @param resources vector of resource names to watch for. If this is empty, then all + * @param resources set of resource names to watch for. If this is empty, then all * resources for type_url will result in callbacks. * @param callbacks the callbacks to be notified of configuration updates. These must be valid * until GrpcMuxWatch is destroyed. @@ -90,7 +90,7 @@ class GrpcMux { * away, its EDS updates should be cancelled by destroying the GrpcMuxWatchPtr. */ virtual GrpcMuxWatchPtr subscribe(const std::string& type_url, - const std::vector& resources, + const std::set& resources, GrpcMuxCallbacks& callbacks) PURE; /** diff --git a/include/envoy/config/subscription.h b/include/envoy/config/subscription.h index 2897e9798befc..badd3bad890d3 100644 --- a/include/envoy/config/subscription.h +++ b/include/envoy/config/subscription.h @@ -69,18 +69,18 @@ class Subscription { /** * Start a configuration subscription asynchronously. This should be called once and will continue * to fetch throughout the lifetime of the Subscription object. - * @param resources vector of resource names to fetch. + * @param resources set of resource names to fetch. * @param callbacks the callbacks to be notified of configuration updates. The callback must not * result in the deletion of the Subscription object. */ - virtual void start(const std::vector& resources, - SubscriptionCallbacks& callbacks) PURE; + virtual void start(const std::set& resources, SubscriptionCallbacks& callbacks) PURE; /** * Update the resources to fetch. - * @param resources vector of resource names to fetch. + * @param resources vector of resource names to fetch. It's a (not unordered_)set so that it can + * be passed to std::set_difference, which must be given sorted collections. */ - virtual void updateResources(const std::vector& resources) PURE; + virtual void updateResources(const std::set& update_to_these_names) PURE; }; /** diff --git a/source/common/config/delta_subscription_impl.h b/source/common/config/delta_subscription_impl.h index 1e5f0e7cc0adb..b614c7806f409 100644 --- a/source/common/config/delta_subscription_impl.h +++ b/source/common/config/delta_subscription_impl.h @@ -20,9 +20,10 @@ namespace Envoy { namespace Config { -struct ResourceNameDiff { - std::vector added_; - std::vector removed_; +struct UpdateAck { + UpdateAck(absl::string_view nonce) : nonce_(nonce) {} + std::string nonce_; + ::google::rpc::Status error_detail_; }; /** @@ -48,53 +49,6 @@ class DeltaSubscriptionImpl : public Subscription, request_.mutable_node()->MergeFrom(local_info_.node()); } - // Enqueues and attempts to send a discovery request, (un)subscribing to resources missing from / - // added to the passed 'resources' argument, relative to resource_versions_. - void buildAndQueueDiscoveryRequest(const std::vector& resources) { - ResourceNameDiff diff; - std::set_difference(resources.begin(), resources.end(), resource_names_.begin(), - resource_names_.end(), std::inserter(diff.added_, diff.added_.begin())); - std::set_difference(resource_names_.begin(), resource_names_.end(), resources.begin(), - resources.end(), std::inserter(diff.removed_, diff.removed_.begin())); - - for (const auto& added : diff.added_) { - setResourceWaitingForServer(added); - } - for (const auto& removed : diff.removed_) { - lostInterestInResource(removed); - } - queueDiscoveryRequest(diff); - } - - void sendDiscoveryRequest(const ResourceNameDiff& diff) { - if (!grpc_stream_.grpcStreamAvailable()) { - ENVOY_LOG(debug, "No stream available to sendDiscoveryRequest for {}", type_url_); - return; // Drop this request; the reconnect will enqueue a new one. - } - if (paused_) { - ENVOY_LOG(trace, "API {} paused during sendDiscoveryRequest().", type_url_); - pending_ = diff; - return; // The unpause will send this request. - } - - request_.clear_resource_names_subscribe(); - request_.clear_resource_names_unsubscribe(); - std::copy(diff.added_.begin(), diff.added_.end(), - Protobuf::RepeatedFieldBackInserter(request_.mutable_resource_names_subscribe())); - std::copy(diff.removed_.begin(), diff.removed_.end(), - Protobuf::RepeatedFieldBackInserter(request_.mutable_resource_names_unsubscribe())); - - ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url_, request_.DebugString()); - grpc_stream_.sendMessage(request_); - request_.clear_error_detail(); - request_.clear_initial_resource_versions(); - } - - void subscribe(const std::vector& resources) { - ENVOY_LOG(debug, "delta subscribe for " + type_url_); - buildAndQueueDiscoveryRequest(resources); - } - void pause() { ENVOY_LOG(debug, "Pausing discovery requests for {}", type_url_); ASSERT(!paused_); @@ -105,38 +59,90 @@ class DeltaSubscriptionImpl : public Subscription, ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url_); ASSERT(paused_); paused_ = false; - if (pending_.has_value()) { - queueDiscoveryRequest(pending_.value()); - pending_.reset(); - } + trySendDiscoveryRequests(); } envoy::api::v2::DeltaDiscoveryRequest internalRequestStateForTest() const { return request_; } + // Config::Subscription + void start(const std::set& resources, SubscriptionCallbacks& callbacks) override { + callbacks_ = &callbacks; + + if (init_fetch_timeout_.count() > 0) { + init_fetch_timeout_timer_ = dispatcher_.createTimer([this]() -> void { + ENVOY_LOG(warn, "delta config: initial fetch timed out for {}", type_url_); + callbacks_->onConfigUpdateFailed(nullptr); + }); + init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); + } + + grpc_stream_.establishNewStream(); + updateResources(resources); + } + + void updateResources(const std::set& update_to_these_names) override { + std::vector cur_added; + std::vector cur_removed; + + std::set_difference(update_to_these_names.begin(), update_to_these_names.end(), + resource_names_.begin(), resource_names_.end(), + std::inserter(cur_added, cur_added.begin())); + std::set_difference(resource_names_.begin(), resource_names_.end(), + update_to_these_names.begin(), update_to_these_names.end(), + std::inserter(cur_removed, cur_removed.begin())); + + for (const auto& a : cur_added) { + setResourceWaitingForServer(a); + // Removed->added requires us to keep track of it as a "new" addition, since our user may have + // forgotten its copy of the resource after instructing us to remove it, and so needs to be + // reminded of it. + names_removed_.erase(a); + names_added_.insert(a); + } + for (const auto& r : cur_removed) { + lostInterestInResource(r); + // Ideally, when a resource is added-then-removed in between requests, we would avoid putting + // a superfluous "unsubscribe [resource that was never subscribed]" in the request. However, + // the removed-then-added case *does* need to go in the request, and due to how we accomplish + // that, it's difficult to distinguish remove-add-remove from add-remove (because "remove-add" + // has to be treated as equivalent to just "add"). + names_added_.erase(r); + names_removed_.insert(r); + } + + stats_.update_attempt_.inc(); + // Tell the server about our new interests (but only if there are any). + if (!names_added_.empty() || !names_removed_.empty()) { + kickOffDiscoveryRequest(); + } + } + // Config::GrpcStreamCallbacks void onStreamEstablished() override { - // initial_resource_versions "must be populated for first request in a stream", so guarantee - // that the initial version'd request we're about to enqueue is what gets sent. - clearRequestQueue(); - request_.Clear(); for (auto const& resource : resource_versions_) { // Populate initial_resource_versions with the resource versions we currently have. Resources // we are interested in, but are still waiting to get any version of from the server, do not - // belong in initial_resource_versions. + // belong in initial_resource_versions. (But do belong in new subscriptions!) if (!resource.second.waitingForServer()) { (*request_.mutable_initial_resource_versions())[resource.first] = resource.second.version(); } + // As mentioned above, fill resource_names_subscribe with everything. + names_added_.insert(resource.first); } + names_removed_.clear(); request_.set_type_url(type_url_); request_.mutable_node()->MergeFrom(local_info_.node()); - queueDiscoveryRequest(ResourceNameDiff()); // no change to subscribed resources + kickOffDiscoveryRequest(); } void onEstablishmentFailure() override { disableInitFetchTimeoutTimer(); stats_.update_failure_.inc(); ENVOY_LOG(debug, "delta update for {} failed", type_url_); + // TODO(fredlas) this increment is needed to pass existing tests, but it seems wrong. We already + // increment it when updating subscription interest, which attempts a request. Is this supposed + // to be the sum of client- and server- initiated update attempts? Seems weird. stats_.update_attempt_.inc(); callbacks_->onConfigUpdateFailed(nullptr); } @@ -147,49 +153,25 @@ class DeltaSubscriptionImpl : public Subscription, message->system_version_info()); disableInitFetchTimeoutTimer(); - request_.set_response_nonce(message->nonce()); - + UpdateAck ack(message->nonce()); try { handleConfigUpdate(message->resources(), message->removed_resources(), message->system_version_info()); } catch (const EnvoyException& e) { stats_.update_rejected_.inc(); ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); + // TODO(fredlas) this increment is needed to pass existing tests, but it seems wrong. We + // already increment it when updating subscription interest, which attempts a request. Is this + // supposed to be the sum of client- and server- initiated update attempts? Seems weird. stats_.update_attempt_.inc(); callbacks_->onConfigUpdateFailed(&e); - ::google::rpc::Status* error_detail = request_.mutable_error_detail(); - error_detail->set_code(Grpc::Status::GrpcStatus::Internal); - error_detail->set_message(e.what()); + ack.error_detail_.set_code(Grpc::Status::GrpcStatus::Internal); + ack.error_detail_.set_message(e.what()); } - queueDiscoveryRequest(ResourceNameDiff()); // no change to subscribed resources + kickOffDiscoveryRequestWithAck(ack); } - void onWriteable() override { drainRequests(); } - - // Config::Subscription - void start(const std::vector& resources, SubscriptionCallbacks& callbacks) override { - callbacks_ = &callbacks; - - if (init_fetch_timeout_.count() > 0) { - init_fetch_timeout_timer_ = dispatcher_.createTimer([this]() -> void { - ENVOY_LOG(warn, "delta config: initial fetch timed out for {}", type_url_); - callbacks_->onConfigUpdateFailed(nullptr); - }); - init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); - } - - grpc_stream_.establishNewStream(); - subscribe(resources); - // The attempt stat here is maintained for the purposes of having consistency between ADS and - // individual DeltaSubscriptions. Since ADS is push based and muxed, the notion of an - // "attempt" for a given xDS API combined by ADS is not really that meaningful. - stats_.update_attempt_.inc(); - } - - void updateResources(const std::vector& resources) override { - subscribe(resources); - stats_.update_attempt_.inc(); - } + void onWriteable() override { trySendDiscoveryRequests(); } private: void @@ -214,6 +196,9 @@ class DeltaSubscriptionImpl : public Subscription, } } stats_.update_success_.inc(); + // TODO(fredlas) this increment is needed to pass existing tests, but it seems wrong. We already + // increment it when updating subscription interest, which attempts a request. Is this supposed + // to be the sum of client- and server- initiated update attempts? Seems weird. stats_.update_attempt_.inc(); stats_.version_.set(HashUtil::xxHash64(version_info)); ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, @@ -227,14 +212,60 @@ class DeltaSubscriptionImpl : public Subscription, } } - void drainRequests() { - ENVOY_LOG(trace, "draining discovery requests {}", request_queue_.size()); - while (!request_queue_.empty() && grpc_stream_.checkRateLimitAllowsDrain()) { - // Process the request, if rate limiting is not enabled at all or if it is under rate limit. - sendDiscoveryRequest(request_queue_.front()); - request_queue_.pop(); + void kickOffDiscoveryRequest() { kickOffDiscoveryRequestWithAck(absl::nullopt); } + + void kickOffDiscoveryRequestWithAck(absl::optional ack) { + ack_queue_.push(ack); + trySendDiscoveryRequests(); + } + + // What's with the optional? DeltaDiscoveryRequest plays two independent roles: + // informing the server of what resources we're interested in, and acknowledging resources it has + // sent us. Some requests are queued up specifically to carry ACKs, and some are queued up for + // resource updates. Subscription changes might get included in an ACK request. In that case, the + // pending request that the subscription change queued up does still get sent, just empty and + // pointless. (TODO(fredlas) we would like to skip those no-op requests). + void sendDiscoveryRequest(absl::optional maybe_ack) { + if (maybe_ack.has_value()) { + const UpdateAck& ack = maybe_ack.value(); + request_.set_response_nonce(ack.nonce_); + *request_.mutable_error_detail() = ack.error_detail_; } - grpc_stream_.maybeUpdateQueueSizeStat(request_queue_.size()); + request_.clear_resource_names_subscribe(); + request_.clear_resource_names_unsubscribe(); + std::copy(names_added_.begin(), names_added_.end(), + Protobuf::RepeatedFieldBackInserter(request_.mutable_resource_names_subscribe())); + std::copy(names_removed_.begin(), names_removed_.end(), + Protobuf::RepeatedFieldBackInserter(request_.mutable_resource_names_unsubscribe())); + names_added_.clear(); + names_removed_.clear(); + + ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url_, request_.DebugString()); + grpc_stream_.sendMessage(request_); + request_.clear_error_detail(); + request_.clear_initial_resource_versions(); + } + + bool shouldSendDiscoveryRequest() { + if (paused_) { + ENVOY_LOG(trace, "API {} paused; discovery request on hold for now.", type_url_); + return false; + } else if (!grpc_stream_.grpcStreamAvailable()) { + ENVOY_LOG(trace, "No stream available to send a DiscoveryRequest for {}.", type_url_); + return false; + } else if (!grpc_stream_.checkRateLimitAllowsDrain()) { + ENVOY_LOG(trace, "{} DiscoveryRequest hit rate limit; will try later.", type_url_); + return false; + } + return true; + } + + void trySendDiscoveryRequests() { + while (!ack_queue_.empty() && shouldSendDiscoveryRequest()) { + sendDiscoveryRequest(ack_queue_.front()); + ack_queue_.pop(); + } + grpc_stream_.maybeUpdateQueueSizeStat(ack_queue_.size()); } class ResourceVersion { @@ -272,23 +303,6 @@ class DeltaSubscriptionImpl : public Subscription, resource_names_.erase(resource_name); } - void queueDiscoveryRequest(const ResourceNameDiff& queue_item) { - request_queue_.push(queue_item); - drainRequests(); - } - - void clearRequestQueue() { - grpc_stream_.maybeUpdateQueueSizeStat(0); - // TODO(fredlas) when we have C++17: request_queue_ = {}; - while (!request_queue_.empty()) { - request_queue_.pop(); - } - } - - // A queue to store requests while rate limited. Note that when requests cannot be sent due to the - // gRPC stream being down, this queue does not store them; rather, they are simply dropped. - std::queue request_queue_; - GrpcStream grpc_stream_; @@ -299,15 +313,25 @@ class DeltaSubscriptionImpl : public Subscription, std::unordered_map resource_versions_; // The keys of resource_versions_. Only tracked separately because std::map does not provide an // iterator into just its keys, e.g. for use in std::set_difference. - std::unordered_set resource_names_; + // Must be stored sorted to work with std::set_difference. + std::set resource_names_; const std::string type_url_; SubscriptionCallbacks* callbacks_{}; - // In-flight or previously sent request. + // The request being built for the next send. envoy::api::v2::DeltaDiscoveryRequest request_; - // Paused via pause()? bool paused_{}; - absl::optional pending_; + + // An item in the queue represents a DeltaDiscoveryRequest that must be sent. If an item is not + // empty, it is the ACK (nonce + error_detail) to set on that request. See + // trySendDiscoveryRequests() for more details. + std::queue> ack_queue_; + + // Tracking of the delta in our subscription interest since the previous DeltaDiscoveryRequest was + // sent. Can't use unordered_set due to ordering issues in gTest expectation matching. Feel free + // to change if you can figure out how to make it work. + std::set names_added_; + std::set names_removed_; const LocalInfo::LocalInfo& local_info_; SubscriptionStats stats_; diff --git a/source/common/config/filesystem_subscription_impl.h b/source/common/config/filesystem_subscription_impl.h index 5a9344d994876..e5f2dbac266b0 100644 --- a/source/common/config/filesystem_subscription_impl.h +++ b/source/common/config/filesystem_subscription_impl.h @@ -35,7 +35,7 @@ class FilesystemSubscriptionImpl : public Config::Subscription, } // Config::Subscription - void start(const std::vector& resources, + void start(const std::set& resources, Config::SubscriptionCallbacks& callbacks) override { // We report all discovered resources in the watched file. UNREFERENCED_PARAMETER(resources); @@ -45,7 +45,7 @@ class FilesystemSubscriptionImpl : public Config::Subscription, refresh(); } - void updateResources(const std::vector& resources) override { + void updateResources(const std::set& resources) override { // We report all discovered resources in the watched file. UNREFERENCED_PARAMETER(resources); // Bump stats for consistence behavior with other xDS. diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index c962b0e9dd838..fc654d7e1c1d6 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -67,7 +67,7 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { } GrpcMuxWatchPtr GrpcMuxImpl::subscribe(const std::string& type_url, - const std::vector& resources, + const std::set& resources, GrpcMuxCallbacks& callbacks) { auto watch = std::unique_ptr(new GrpcMuxWatchImpl(resources, callbacks, type_url, *this)); diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 49a961895229d..8702662fc683d 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -31,7 +31,7 @@ class GrpcMuxImpl : public GrpcMux, ~GrpcMuxImpl(); void start() override; - GrpcMuxWatchPtr subscribe(const std::string& type_url, const std::vector& resources, + GrpcMuxWatchPtr subscribe(const std::string& type_url, const std::set& resources, GrpcMuxCallbacks& callbacks) override; void pause(const std::string& type_url) override; void resume(const std::string& type_url) override; @@ -53,7 +53,7 @@ class GrpcMuxImpl : public GrpcMux, void setRetryTimer(); struct GrpcMuxWatchImpl : public GrpcMuxWatch { - GrpcMuxWatchImpl(const std::vector& resources, GrpcMuxCallbacks& callbacks, + GrpcMuxWatchImpl(const std::set& resources, GrpcMuxCallbacks& callbacks, const std::string& type_url, GrpcMuxImpl& parent) : resources_(resources), callbacks_(callbacks), type_url_(type_url), parent_(parent), inserted_(true) { @@ -68,7 +68,7 @@ class GrpcMuxImpl : public GrpcMux, } } } - std::vector resources_; + std::set resources_; GrpcMuxCallbacks& callbacks_; const std::string type_url_; GrpcMuxImpl& parent_; @@ -110,7 +110,7 @@ class GrpcMuxImpl : public GrpcMux, class NullGrpcMuxImpl : public GrpcMux { public: void start() override {} - GrpcMuxWatchPtr subscribe(const std::string&, const std::vector&, + GrpcMuxWatchPtr subscribe(const std::string&, const std::set&, GrpcMuxCallbacks&) override { throw EnvoyException("ADS must be configured to support an ADS config source"); } diff --git a/source/common/config/grpc_mux_subscription_impl.h b/source/common/config/grpc_mux_subscription_impl.h index 5526651ae7f87..6ccce8f740de0 100644 --- a/source/common/config/grpc_mux_subscription_impl.h +++ b/source/common/config/grpc_mux_subscription_impl.h @@ -27,7 +27,7 @@ class GrpcMuxSubscriptionImpl : public Subscription, init_fetch_timeout_(init_fetch_timeout) {} // Config::Subscription - void start(const std::vector& resources, SubscriptionCallbacks& callbacks) override { + void start(const std::set& resources, SubscriptionCallbacks& callbacks) override { callbacks_ = &callbacks; if (init_fetch_timeout_.count() > 0) { @@ -45,8 +45,12 @@ class GrpcMuxSubscriptionImpl : public Subscription, stats_.update_attempt_.inc(); } - void updateResources(const std::vector& resources) override { - watch_ = grpc_mux_.subscribe(type_url_, resources, *this); + void updateResources(const std::set& update_to_these_names) override { + // First destroy the watch, so that this subscribe doesn't send a request for both the + // previously watched resources and the new ones (we may have lost interest in some of the + // previously watched ones). + watch_.reset(); + watch_ = grpc_mux_.subscribe(type_url_, update_to_these_names, *this); stats_.update_attempt_.inc(); } diff --git a/source/common/config/grpc_subscription_impl.h b/source/common/config/grpc_subscription_impl.h index 04b1b2aa6981f..0c90b8a47f065 100644 --- a/source/common/config/grpc_subscription_impl.h +++ b/source/common/config/grpc_subscription_impl.h @@ -25,15 +25,15 @@ class GrpcSubscriptionImpl : public Config::Subscription { grpc_mux_subscription_(grpc_mux_, stats, type_url, dispatcher, init_fetch_timeout) {} // Config::Subscription - void start(const std::vector& resources, + void start(const std::set& resources, Config::SubscriptionCallbacks& callbacks) override { // Subscribe first, so we get failure callbacks if grpc_mux_.start() fails. grpc_mux_subscription_.start(resources, callbacks); grpc_mux_.start(); } - void updateResources(const std::vector& resources) override { - grpc_mux_subscription_.updateResources(resources); + void updateResources(const std::set& update_to_these_names) override { + grpc_mux_subscription_.updateResources(update_to_these_names); } GrpcMuxImpl& grpcMux() { return grpc_mux_; } diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index 0ae9e6a2287e0..fc5e0e7c6bfa1 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -48,7 +48,7 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, } // Config::Subscription - void start(const std::vector& resources, + void start(const std::set& resources, Config::SubscriptionCallbacks& callbacks) override { ASSERT(callbacks_ == nullptr); @@ -67,9 +67,9 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, initialize(); } - void updateResources(const std::vector& resources) override { - Protobuf::RepeatedPtrField resources_vector(resources.begin(), - resources.end()); + void updateResources(const std::set& update_to_these_names) override { + Protobuf::RepeatedPtrField resources_vector( + update_to_these_names.begin(), update_to_these_names.end()); request_.mutable_resource_names()->Swap(&resources_vector); } diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 98ea598c25794..b8820162638a1 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -1,6 +1,7 @@ #include "test/common/config/delta_subscription_test_harness.h" using testing::AnyNumber; +using testing::InSequence; using testing::UnorderedElementsAre; namespace Envoy { @@ -77,14 +78,103 @@ TEST_F(DeltaSubscriptionImplTest, ResourceGoneLeadsToBlankInitialVersion) { // ...but our own map should remember our interest. In particular, losing interest in all 3 should // cause their names to appear in the resource_names_unsubscribe field of a DeltaDiscoveryRequest. - subscription_->resume(); // now we do want the request to actually get sendMessage()'d. - EXPECT_CALL(async_stream_, sendMessage(_, _)).WillOnce([](const Protobuf::Message& msg, bool) { - auto sent_request = static_cast(&msg); - EXPECT_THAT(sent_request->resource_names_subscribe(), UnorderedElementsAre("name4")); - EXPECT_THAT(sent_request->resource_names_unsubscribe(), - UnorderedElementsAre("name1", "name2", "name3")); - }); - subscription_->subscribe({"name4"}); // (implies "we no longer care about name1,2,3") + subscription_->resume(); // we do want the final subscribe() to do a sendMessage(). + expectSendMessage({"name4"}, {"name1", "name2", "name3"}, Grpc::Status::GrpcStatus::Ok, ""); + subscription_->updateResources({"name4"}); // (implies "we no longer care about name1,2,3") +} + +// Delta xDS reliably queues up and sends all discovery requests, even in situations where it isn't +// strictly necessary. E.g.: if you subscribe but then unsubscribe to a given resource, all before a +// request was able to be sent, two requests will be sent. The following tests test various cases of +// this reliability. TODO TODO REMOVE PROBABLY +// +// If Envoy decided it wasn't interested in a resource and then (before a request was sent) decided +// it was again, for all we know, it dropped that resource in between and needs to retrieve it +// again. So, we *should* send a request "re-"subscribing. This means that the server needs to +// interpret the resource_names_subscribe field as "send these resources even if you think Envoy +// already has them". +TEST_F(DeltaSubscriptionImplTest, RemoveThenAdd) { + startSubscription({"name1", "name2", "name3"}); + subscription_->pause(); // Pause because we're testing multiple updates in between request sends. + subscription_->updateResources({"name1", "name2"}); + subscription_->updateResources({"name1", "name2", "name3"}); + InSequence s; + expectSendMessage({"name3"}, {}, Grpc::Status::GrpcStatus::Ok, ""); + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the second update + subscription_->resume(); +} + +// Due to how our implementation provides the required behavior tested in RemoveThenAdd, the +// add-then-remove case *also* causes the resource to be referred to in the request (as an +// unsubscribe). +// Unlike the remove-then-add case, this one really is unnecessary, and ideally we would have +// the request simply not include any mention of the resource. Oh well. +// This test is just here to illustrate that this behavior exists, not to enforce that it +// should be like this. What *is* important: the server must happily and cleanly ignore +// "unsubscribe from [resource name I have never before referred to]" requests. +TEST_F(DeltaSubscriptionImplTest, AddThenRemove) { + startSubscription({"name1", "name2", "name3"}); + subscription_->pause(); // Pause because we're testing multiple updates in between request sends. + subscription_->updateResources({"name1", "name2", "name3", "name4"}); + subscription_->updateResources({"name1", "name2", "name3"}); + InSequence s; + expectSendMessage({}, {"name4"}, Grpc::Status::GrpcStatus::Ok, ""); + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the second update + subscription_->resume(); +} + +// add/remove/add == add. +TEST_F(DeltaSubscriptionImplTest, AddRemoveAdd) { + startSubscription({"name1", "name2", "name3"}); + subscription_->pause(); + subscription_->updateResources({"name1", "name2", "name3", "name4"}); + subscription_->updateResources({"name1", "name2", "name3"}); + subscription_->updateResources({"name1", "name2", "name3", "name4"}); + InSequence s; + expectSendMessage({"name4"}, {}, Grpc::Status::GrpcStatus::Ok, ""); + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the second update + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the third update + subscription_->resume(); +} + +// remove/add/remove == remove. +TEST_F(DeltaSubscriptionImplTest, RemoveAddRemove) { + startSubscription({"name1", "name2", "name3"}); + subscription_->pause(); + subscription_->updateResources({"name1", "name2"}); + subscription_->updateResources({"name1", "name2", "name3"}); + subscription_->updateResources({"name1", "name2"}); + InSequence s; + expectSendMessage({}, {"name3"}, Grpc::Status::GrpcStatus::Ok, ""); + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the second update + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the third update + subscription_->resume(); +} + +// Starts with 1,2,3. 4 is added/removed/added. In those same updates, 1,2,3 are +// removed/added/removed. End result should be 4 added and 1,2,3 removed. +TEST_F(DeltaSubscriptionImplTest, BothAddAndRemove) { + startSubscription({"name1", "name2", "name3"}); + subscription_->pause(); + subscription_->updateResources({"name4"}); + subscription_->updateResources({"name1", "name2", "name3"}); + subscription_->updateResources({"name4"}); + InSequence s; + expectSendMessage({"name4"}, {"name1", "name2", "name3"}, Grpc::Status::GrpcStatus::Ok, ""); + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the second update + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the third update + subscription_->resume(); +} + +TEST_F(DeltaSubscriptionImplTest, CumulativeUpdates) { + startSubscription({"name1"}); + subscription_->pause(); + subscription_->updateResources({"name1", "name2"}); + subscription_->updateResources({"name1", "name2", "name3"}); + InSequence s; + expectSendMessage({"name2", "name3"}, {}, Grpc::Status::GrpcStatus::Ok, ""); + expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Ok, ""); // no-op due to the second update + subscription_->resume(); } } // namespace diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index 5764b619de08c..64d4b966f1bcc 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -13,7 +13,6 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::InSequence; using testing::Mock; using testing::NiceMock; using testing::Return; @@ -38,7 +37,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { rate_limit_settings_, stats_, init_fetch_timeout); } - void startSubscription(const std::vector& cluster_names) override { + void startSubscription(const std::set& cluster_names) override { EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); last_cluster_names_ = cluster_names; expectSendMessage({}, ""); @@ -46,23 +45,23 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { subscription_->start(cluster_names, callbacks_); } - void expectSendMessage(const std::vector& cluster_names, + void expectSendMessage(const std::set& cluster_names, const std::string& version) override { UNREFERENCED_PARAMETER(version); expectSendMessage(cluster_names, {}, Grpc::Status::GrpcStatus::Ok, ""); } - void expectSendMessage(const std::vector& subscribe, - const std::vector& unsubscribe, - const Protobuf::int32 error_code, const std::string& error_message) { + void expectSendMessage(const std::set& subscribe, + const std::set& unsubscribe, const Protobuf::int32 error_code, + const std::string& error_message) { envoy::api::v2::DeltaDiscoveryRequest expected_request; expected_request.mutable_node()->CopyFrom(node_); - for (const auto& resource : subscribe) { - expected_request.add_resource_names_subscribe(resource); - } - for (auto resource = unsubscribe.rbegin(); resource != unsubscribe.rend(); ++resource) { - expected_request.add_resource_names_unsubscribe(*resource); - } + std::copy( + subscribe.begin(), subscribe.end(), + Protobuf::RepeatedFieldBackInserter(expected_request.mutable_resource_names_subscribe())); + std::copy( + unsubscribe.begin(), unsubscribe.end(), + Protobuf::RepeatedFieldBackInserter(expected_request.mutable_resource_names_unsubscribe())); expected_request.set_response_nonce(last_response_nonce_); expected_request.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); @@ -71,7 +70,6 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { error_detail->set_code(error_code); error_detail->set_message(error_message); } - std::cerr << "EXPECTING DiscoveryRequest: " << expected_request.DebugString() << std::endl; EXPECT_CALL(async_stream_, sendMessage(ProtoEq(expected_request), false)); } @@ -108,11 +106,17 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { Mock::VerifyAndClearExpectations(&async_stream_); } - void updateResources(const std::vector& cluster_names) override { - std::vector cluster_superset = cluster_names; - cluster_superset.insert(cluster_superset.end(), last_cluster_names_.begin(), - last_cluster_names_.end()); - expectSendMessage(cluster_names, last_cluster_names_, Grpc::Status::GrpcStatus::Ok, ""); + void updateResources(const std::set& cluster_names) override { + std::set sub; + std::set unsub; + + std::set_difference(cluster_names.begin(), cluster_names.end(), last_cluster_names_.begin(), + last_cluster_names_.end(), std::inserter(sub, sub.begin())); + std::set_difference(last_cluster_names_.begin(), last_cluster_names_.end(), + cluster_names.begin(), cluster_names.end(), + std::inserter(unsub, unsub.begin())); + + expectSendMessage(sub, unsub, Grpc::Status::GrpcStatus::Ok, ""); subscription_->updateResources(cluster_names); last_cluster_names_ = cluster_names; } @@ -140,7 +144,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { Grpc::MockAsyncStream async_stream_; std::unique_ptr subscription_; std::string last_response_nonce_; - std::vector last_cluster_names_; + std::set last_cluster_names_; Envoy::Config::RateLimitSettings rate_limit_settings_; Event::MockTimer* init_timeout_timer_; envoy::api::v2::core::Node node_; diff --git a/test/common/config/filesystem_subscription_test_harness.h b/test/common/config/filesystem_subscription_test_harness.h index e5a8e85e8f237..912d21739419d 100644 --- a/test/common/config/filesystem_subscription_test_harness.h +++ b/test/common/config/filesystem_subscription_test_harness.h @@ -37,13 +37,13 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { } } - void startSubscription(const std::vector& cluster_names) override { + void startSubscription(const std::set& cluster_names) override { std::ifstream config_file(path_); file_at_start_ = config_file.good(); subscription_.start(cluster_names, callbacks_); } - void updateResources(const std::vector& cluster_names) override { + void updateResources(const std::set& cluster_names) override { subscription_.updateResources(cluster_names); } @@ -57,7 +57,7 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { } } - void expectSendMessage(const std::vector& cluster_names, + void expectSendMessage(const std::set& cluster_names, const std::string& version) override { UNREFERENCED_PARAMETER(cluster_names); UNREFERENCED_PARAMETER(version); diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index 8128edb534f88..5c23300dab288 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -50,12 +50,12 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { ~GrpcSubscriptionTestHarness() override { EXPECT_CALL(async_stream_, sendMessage(_, false)); } - void expectSendMessage(const std::vector& cluster_names, + void expectSendMessage(const std::set& cluster_names, const std::string& version) override { expectSendMessage(cluster_names, version, Grpc::Status::GrpcStatus::Ok, ""); } - void expectSendMessage(const std::vector& cluster_names, const std::string& version, + void expectSendMessage(const std::set& cluster_names, const std::string& version, const Protobuf::int32 error_code, const std::string& error_message) { envoy::api::v2::DiscoveryRequest expected_request; expected_request.mutable_node()->CopyFrom(node_); @@ -75,7 +75,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { EXPECT_CALL(async_stream_, sendMessage(ProtoEq(expected_request), false)); } - void startSubscription(const std::vector& cluster_names) override { + void startSubscription(const std::set& cluster_names) override { EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); last_cluster_names_ = cluster_names; expectSendMessage(last_cluster_names_, ""); @@ -113,11 +113,19 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { Mock::VerifyAndClearExpectations(&async_stream_); } - void updateResources(const std::vector& cluster_names) override { - std::vector cluster_superset = cluster_names; - cluster_superset.insert(cluster_superset.end(), last_cluster_names_.begin(), - last_cluster_names_.end()); - expectSendMessage(cluster_superset, version_); + void updateResources(const std::set& cluster_names) override { + // The "watch" mechanism means that updates that lose interest in a resource + // will first generate a request for [still watched resources, i.e. without newly unwatched + // ones] before generating the request for all of cluster_names. + // TODO(fredlas) this unnecessary second request will stop happening once the watch mechanism is + // no longer internally used by GrpcSubscriptionImpl. + std::set both; + for (const auto& n : cluster_names) { + if (last_cluster_names_.find(n) != last_cluster_names_.end()) { + both.insert(n); + } + } + expectSendMessage(both, version_); expectSendMessage(cluster_names, version_); subscription_->updateResources(cluster_names); last_cluster_names_ = cluster_names; @@ -151,7 +159,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { Grpc::MockAsyncStream async_stream_; std::unique_ptr subscription_; std::string last_response_nonce_; - std::vector last_cluster_names_; + std::set last_cluster_names_; NiceMock local_info_; Envoy::Config::RateLimitSettings rate_limit_settings_; Event::MockTimer* init_timeout_timer_; diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index 35c2e6d3b87f1..7e6cfc828092d 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -56,7 +56,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { } } - void expectSendMessage(const std::vector& cluster_names, + void expectSendMessage(const std::set& cluster_names, const std::string& version) override { EXPECT_CALL(cm_, httpAsyncClientForCluster("eds_cluster")); EXPECT_CALL(cm_.async_client_, send_(_, _, _)) @@ -76,8 +76,16 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { } expected_request += "\"node\":{\"id\":\"fo0\"},"; if (!cluster_names.empty()) { - expected_request += - "\"resource_names\":[\"" + StringUtil::join(cluster_names, "\",\"") + "\"]"; + std::string joined_cluster_names; + { + std::string delimiter = "\",\""; + std::ostringstream buf; + std::copy(cluster_names.begin(), cluster_names.end(), + std::ostream_iterator(buf, delimiter.c_str())); + std::string with_comma = buf.str(); + joined_cluster_names = with_comma.substr(0, with_comma.length() - delimiter.length()); + } + expected_request += "\"resource_names\":[\"" + joined_cluster_names + "\"]"; } expected_request += "}"; EXPECT_EQ(expected_request, request->bodyAsString()); @@ -88,14 +96,14 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { })); } - void startSubscription(const std::vector& cluster_names) override { + void startSubscription(const std::set& cluster_names) override { version_ = ""; cluster_names_ = cluster_names; expectSendMessage(cluster_names, ""); subscription_->start(cluster_names, callbacks_); } - void updateResources(const std::vector& cluster_names) override { + void updateResources(const std::set& cluster_names) override { cluster_names_ = cluster_names; expectSendMessage(cluster_names, version_); subscription_->updateResources(cluster_names); @@ -154,7 +162,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { bool request_in_progress_{}; std::string version_; - std::vector cluster_names_; + std::set cluster_names_; const Protobuf::MethodDescriptor* method_descriptor_; Upstream::MockClusterManager cm_; Event::MockDispatcher dispatcher_; diff --git a/test/common/config/subscription_impl_test.cc b/test/common/config/subscription_impl_test.cc index 35b1f819acb63..67a9566619c2a 100644 --- a/test/common/config/subscription_impl_test.cc +++ b/test/common/config/subscription_impl_test.cc @@ -39,16 +39,15 @@ class SubscriptionImplTest : public testing::TestWithParam { } } - void startSubscription(const std::vector& cluster_names) { + void startSubscription(const std::set& cluster_names) { test_harness_->startSubscription(cluster_names); } - void updateResources(const std::vector& cluster_names) { + void updateResources(const std::set& cluster_names) { test_harness_->updateResources(cluster_names); } - void expectSendMessage(const std::vector& cluster_names, - const std::string& version) { + void expectSendMessage(const std::set& cluster_names, const std::string& version) { test_harness_->expectSendMessage(cluster_names, version); } diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index 5a62f6d861d26..551b4ca254e84 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -24,20 +24,20 @@ class SubscriptionTestHarness { * Start subscription and set related expectations. * @param cluster_names initial cluster names to request via EDS. */ - virtual void startSubscription(const std::vector& cluster_names) PURE; + virtual void startSubscription(const std::set& cluster_names) PURE; /** * Update cluster names to be delivered via EDS. * @param cluster_names cluster names. */ - virtual void updateResources(const std::vector& cluster_names) PURE; + virtual void updateResources(const std::set& cluster_names) PURE; /** * Expect that an update request is sent by the Subscription implementation. * @param cluster_names cluster names to expect in the request. * @param version version_info to expect in the request. */ - virtual void expectSendMessage(const std::vector& cluster_names, + virtual void expectSendMessage(const std::set& cluster_names, const std::string& version) PURE; /** diff --git a/test/mocks/config/mocks.cc b/test/mocks/config/mocks.cc index 72783296cc9fb..00d70fac699b2 100644 --- a/test/mocks/config/mocks.cc +++ b/test/mocks/config/mocks.cc @@ -17,7 +17,7 @@ MockGrpcStreamCallbacks::MockGrpcStreamCallbacks() {} MockGrpcStreamCallbacks::~MockGrpcStreamCallbacks() {} GrpcMuxWatchPtr MockGrpcMux::subscribe(const std::string& type_url, - const std::vector& resources, + const std::set& resources, GrpcMuxCallbacks& callbacks) { return GrpcMuxWatchPtr(subscribe_(type_url, resources, callbacks)); } diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 9cc82f2380834..7bdf32b42898e 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -41,8 +41,8 @@ template class MockSubscriptionCallbacks : public Subscript class MockSubscription : public Subscription { public: MOCK_METHOD2_T(start, - void(const std::vector& resources, SubscriptionCallbacks& callbacks)); - MOCK_METHOD1_T(updateResources, void(const std::vector& resources)); + void(const std::set& resources, SubscriptionCallbacks& callbacks)); + MOCK_METHOD1_T(updateResources, void(const std::set& update_to_these_names)); }; class MockGrpcMuxWatch : public GrpcMuxWatch { @@ -60,9 +60,9 @@ class MockGrpcMux : public GrpcMux { MOCK_METHOD0(start, void()); MOCK_METHOD3(subscribe_, - GrpcMuxWatch*(const std::string& type_url, const std::vector& resources, + GrpcMuxWatch*(const std::string& type_url, const std::set& resources, GrpcMuxCallbacks& callbacks)); - GrpcMuxWatchPtr subscribe(const std::string& type_url, const std::vector& resources, + GrpcMuxWatchPtr subscribe(const std::string& type_url, const std::set& resources, GrpcMuxCallbacks& callbacks); MOCK_METHOD1(pause, void(const std::string& type_url)); MOCK_METHOD1(resume, void(const std::string& type_url)); diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index d16d5d0047a17..f0b067f1688bc 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -757,6 +757,7 @@ unterminated untruncated untrusted untyped +unwatched unweighted unzigzag upstreams From 8ceb9c7e011d5a18511a74cf58184ae7c4a0eeb0 Mon Sep 17 00:00:00 2001 From: Derek Date: Fri, 19 Apr 2019 18:57:32 -0700 Subject: [PATCH 157/165] docs: add aspell to mac dependencies to fix check format script (#6661) Signed-off-by: Derek Schaller --- bazel/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bazel/README.md b/bazel/README.md index 5c719e201b5d0..58215d7de1616 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -50,7 +50,7 @@ for how to update or override dependencies. On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/): ``` - brew install coreutils wget cmake libtool go bazel automake ninja llvm@7 autoconf + brew install coreutils wget cmake libtool go bazel automake ninja llvm@7 autoconf aspell ``` _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum`; `llvm@7` is used for `clang-format` @@ -366,7 +366,7 @@ The following optional features can be enabled on the Bazel build command-line: release builds so that the condition is not evaluated. This option has no effect in debug builds. * memory-debugging (scribbling over memory after allocation and before freeing) with `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL. -* Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with +* Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with `--define path_normalization_by_default=true`. Note this still could be disable by explicit xDS config. ## Disabling extensions From fdb4f1a992edeb394ba690103f8a3e5cf7827a44 Mon Sep 17 00:00:00 2001 From: Bin Wu <46450037+wu-bin@users.noreply.github.com> Date: Fri, 19 Apr 2019 23:19:38 -0400 Subject: [PATCH 158/165] Implement some TODOs in quic_endian_impl.h (#6644) Signed-off-by: Bin Wu --- bazel/external/quiche.BUILD | 1 + .../quic_listeners/quiche/platform/BUILD | 1 + .../quiche/platform/quic_endian_impl.h | 29 +++++++++---------- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 2b65a5dddd787..0d24bc7e22c68 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -277,6 +277,7 @@ envoy_cc_test( name = "quic_platform_test", srcs = envoy_select_quiche( [ + "quiche/quic/platform/api/quic_endian_test.cc", "quiche/quic/platform/api/quic_reference_counted_test.cc", "quiche/quic/platform/api/quic_string_utils_test.cc", "quiche/quic/platform/api/quic_text_utils_test.cc", diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index f290bc635f3d0..f011cd34b33d4 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -129,6 +129,7 @@ envoy_cc_library( ":quic_platform_logging_impl_lib", "//include/envoy/thread:thread_interface", "//source/common/common:assert_lib", + "//source/common/common:byte_order_lib", "//source/server:backtrace_lib", ]), ) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h index 02e5356830bf0..c456da321fa73 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_endian_impl.h @@ -1,31 +1,28 @@ #pragma once -#include - -#include - // NOLINT(namespace-envoy) // This file is part of the QUICHE platform implementation, and is not to be // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include + +#include "common/common/byte_order.h" + namespace quic { class QuicEndianImpl { public: - static uint16_t HostToNet16(uint16_t x) { return htons(x); } - static uint32_t HostToNet32(uint32_t x) { return htonl(x); } - // TODO: implement - static uint64_t HostToNet64(uint64_t /*x*/) { return 0; } - - static uint16_t NetToHost16(uint16_t x) { return ntohs(x); } - static uint32_t NetToHost32(uint32_t x) { return ntohl(x); } - // TODO: implement - static uint64_t NetToHost64(uint64_t /*x*/) { return 0; } - - // TODO: implement - static bool HostIsLittleEndian() { return false; } + static uint16_t HostToNet16(uint16_t x) { return toEndianness(x); } + static uint32_t HostToNet32(uint32_t x) { return toEndianness(x); } + static uint64_t HostToNet64(uint64_t x) { return toEndianness(x); } + + static uint16_t NetToHost16(uint16_t x) { return fromEndianness(x); } + static uint32_t NetToHost32(uint32_t x) { return fromEndianness(x); } + static uint64_t NetToHost64(uint64_t x) { return fromEndianness(x); } + + static bool HostIsLittleEndian() { return NetToHost16(0x1234) != 0x1234; } }; } // namespace quic From 2ae3322b177fd936b30e44dd5b316e6564bdc05e Mon Sep 17 00:00:00 2001 From: Derek Date: Fri, 19 Apr 2019 20:26:29 -0700 Subject: [PATCH 159/165] update bazel readme for clang-format-8 on mac (#6660) Signed-off-by: Derek Schaller --- bazel/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bazel/README.md b/bazel/README.md index 58215d7de1616..99adeb4d2b876 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -50,9 +50,9 @@ for how to update or override dependencies. On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/): ``` - brew install coreutils wget cmake libtool go bazel automake ninja llvm@7 autoconf aspell + brew install coreutils wget cmake libtool go bazel automake ninja clang-format autoconf aspell ``` - _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum`; `llvm@7` is used for `clang-format` + _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum` Envoy compiles and passes tests with the version of clang installed by XCode 9.3.0: Apple LLVM version 9.1.0 (clang-902.0.30). From 92963f927c896ad955115c5c3dd2235bc808cbac Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Sat, 20 Apr 2019 06:32:36 -0700 Subject: [PATCH 160/165] router: defer per try timeout until downstream request is done (#6643) This defers starting the per try timeout timer until onRequestComplete to ensure that it is not started before the global timeout. This ensures that the per try timeout will not take into account the time spent reading the downstream, which should be responsibility of the HCM level timeouts. Signed-off-by: Snow Pettersen --- docs/root/intro/version_history.rst | 3 + source/common/router/router.cc | 16 ++++- source/common/router/router.h | 3 + test/common/router/router_test.cc | 60 ++++++++++++++++++- .../common/router/router_upstream_log_test.cc | 2 +- 5 files changed, 78 insertions(+), 6 deletions(-) diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index cc7a73b1bd9be..f2f0ec1529ca4 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -14,6 +14,9 @@ Version history :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. * router: added ability to control retry back-off intervals via :ref:`retry policy `. +* router: per try timeouts will no longer start before the downstream request has been received + in full by the router. This ensures that the per try timeout does not account for slow + downstreams and that will not start before the global timeout. * upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. 1.10.0 (Apr 5, 2019) diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 3ea401798ad52..bb063522e3019 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -507,6 +507,8 @@ void Filter::maybeDoShadowing() { } void Filter::onRequestComplete() { + // This should be called exactly once, when the downstream request has been received in full. + ASSERT(!downstream_end_stream_); downstream_end_stream_ = true; Event::Dispatcher& dispatcher = callbacks_->dispatcher(); downstream_request_complete_time_ = dispatcher.timeSource().monotonicTime(); @@ -521,6 +523,12 @@ void Filter::onRequestComplete() { response_timeout_ = dispatcher.createTimer([this]() -> void { onResponseTimeout(); }); response_timeout_->enableTimer(timeout_.global_timeout_); } + + for (auto& upstream_request : upstream_requests_) { + if (upstream_request->create_per_try_timeout_on_request_complete_) { + upstream_request->setupPerTryTimeout(); + } + } } } @@ -984,7 +992,7 @@ Filter::UpstreamRequest::UpstreamRequest(Filter& parent, Http::ConnectionPool::I : parent_(parent), conn_pool_(pool), grpc_rq_success_deferred_(false), stream_info_(pool.protocol(), parent_.callbacks_->dispatcher().timeSource()), calling_encode_headers_(false), upstream_canary_(false), encode_complete_(false), - encode_trailers_(false) { + encode_trailers_(false), create_per_try_timeout_on_request_complete_(false) { if (parent_.config_.start_child_span_) { span_ = parent_.callbacks_->activeSpan().spawnChild( @@ -1184,7 +1192,11 @@ void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder, onUpstreamHostSelected(host); request_encoder.getStream().addCallbacks(*this); - setupPerTryTimeout(); + if (parent_.downstream_end_stream_) { + setupPerTryTimeout(); + } else { + create_per_try_timeout_on_request_complete_ = true; + } conn_pool_stream_handle_ = nullptr; setRequestEncoder(request_encoder); diff --git a/source/common/router/router.h b/source/common/router/router.h index bb39d1391a465..0c560ba8fe80c 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -355,6 +355,9 @@ class Filter : Logger::Loggable, bool upstream_canary_ : 1; bool encode_complete_ : 1; bool encode_trailers_ : 1; + // Tracks whether we deferred a per try timeout because the downstream request + // had not been completed yet. + bool create_per_try_timeout_on_request_complete_ : 1; }; typedef std::unique_ptr UpstreamRequestPtr; diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 431b80aecc9e4..b3d8689de565e 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -1152,6 +1152,7 @@ TEST_F(RouterTest, UpstreamTimeoutWithAltResponse) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } +// Verifies that the per try timeout is initialized once the downstream request has been read. TEST_F(RouterTest, UpstreamPerTryTimeout) { NiceMock encoder; Http::StreamDecoder* response_decoder = nullptr; @@ -1167,16 +1168,69 @@ TEST_F(RouterTest, UpstreamPerTryTimeout) { EXPECT_EQ(host_address_, host->address()); })); - expectResponseTimerCreate(); + Http::TestHeaderMapImpl headers{{"x-envoy-internal", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + // We verify that both timeouts are started after decodeData(_, true) is called. This + // verifies that we are not starting the initial per try timeout on the first onPoolReady. expectPerTryTimerCreate(); + expectResponseTimerCreate(); + + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + EXPECT_CALL(callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); + EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset)); + Http::TestHeaderMapImpl response_headers{ + {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; + EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); + EXPECT_CALL(callbacks_, encodeData(_, true)); + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504)); + per_try_timeout_->callback_(); + + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_per_try_timeout") + .value()); + EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value()); + EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); +} + +// Verifies that the per try timeout starts when onPoolReady is called when it occurs +// after the downstream request has been read. +TEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) { + NiceMock encoder; + Http::StreamDecoder* response_decoder = nullptr; + Http::ConnectionPool::Callbacks* pool_callbacks; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + pool_callbacks = &callbacks; + return nullptr; + })); Http::TestHeaderMapImpl headers{{"x-envoy-internal", "true"}, {"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, false); + + // Global timeout starts when decodeData(_, true) is called. + expectResponseTimerCreate(); Buffer::OwnedImpl data; router_.decodeData(data, true); + // Per try timeout starts when onPoolReady is called. + expectPerTryTimerCreate(); + EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) + .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { + EXPECT_EQ(host_address_, host->address()); + })); + + pool_callbacks->onPoolReady(encoder, cm_.conn_pool_.host_); + EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset)); @@ -1364,8 +1418,8 @@ TEST_F(RouterTest, RetryUpstreamPerTryTimeout) { callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_); return nullptr; })); - expectResponseTimerCreate(); expectPerTryTimerCreate(); + expectResponseTimerCreate(); Http::TestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, @@ -1455,8 +1509,8 @@ TEST_F(RouterTest, DontResetStartedResponseOnUpstreamPerTryTimeout) { callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_); return nullptr; })); - expectResponseTimerCreate(); expectPerTryTimerCreate(); + expectResponseTimerCreate(); Http::TestHeaderMapImpl headers{{"x-envoy-internal", "true"}, {"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index 0e41d0442996f..3c8fbec45935e 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -163,8 +163,8 @@ class RouterUpstreamLogTest : public testing::Test { callbacks.onPoolReady(encoder1, context_.cluster_manager_.conn_pool_.host_); return nullptr; })); - expectResponseTimerCreate(); expectPerTryTimerCreate(); + expectResponseTimerCreate(); Http::TestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, From 03ae1ef6b9afcfda9545b3d734b54027776254f9 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Sat, 20 Apr 2019 09:13:17 -0700 Subject: [PATCH 161/165] router: support offseting downstream provided grpc timeout (#6628) This adds support for modifying the grpc-timeout provided by the downstream by some offset. This is useful to make sure that Envoy is able to see timeouts before the gRPC client does, as the client will cancel the request when the deadline has been exceeded which hides the timeout from the outlier detector. Signed-off-by: Snow Pettersen --- api/envoy/api/v2/route/route.proto | 9 ++++++++ docs/root/intro/version_history.rst | 1 + include/envoy/router/router.h | 7 ++++++ source/common/http/async_client_impl.h | 3 +++ source/common/router/config_impl.cc | 1 + source/common/router/config_impl.h | 7 ++++++ source/common/router/router.cc | 9 ++++++++ test/common/router/config_impl_test.cc | 30 ++++++++++++++++++++++++++ test/common/router/router_test.cc | 22 +++++++++++++++++++ test/mocks/router/mocks.h | 1 + 10 files changed, 90 insertions(+) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index 87232a78eadce..10ba8f6b4b7b4 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -765,6 +765,15 @@ message RouteAction { // time gaps between gRPC request and response in gRPC streaming mode. google.protobuf.Duration max_grpc_timeout = 23 [(gogoproto.stdduration) = true]; + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28 [(gogoproto.stdduration) = true]; + // Allows enabling and disabling upgrades on a per-route basis. // This overrides any enabled/disabled upgrade filter chain specified in the // HttpConnectionManager diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index f2f0ec1529ca4..e4f05582044d0 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -13,6 +13,7 @@ Version history * redis: added :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. +* router: add support for configuring a :ref:`grpc timeout offset ` on incoming requests. * router: added ability to control retry back-off intervals via :ref:`retry policy `. * router: per try timeouts will no longer start before the downstream request has been received in full by the router. This ensures that the per try timeout does not account for slow diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index acd9738ab08ee..0eed655a2ec8b 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -621,6 +621,13 @@ class RouteEntry : public ResponseEntry { */ virtual absl::optional maxGrpcTimeout() const PURE; + /** + * @return absl::optional the timeout offset to apply to the timeout + * provided by the 'grpc-timeout' header of a gRPC request. This value will be positive and should + * be subtracted from the value provided by the header. + */ + virtual absl::optional grpcTimeoutOffset() const PURE; + /** * Determine whether a specific request path belongs to a virtual cluster for use in stats, etc. * @param headers supplies the request headers. diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 3e359099b22af..b288664db86a9 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -234,6 +234,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, absl::optional maxGrpcTimeout() const override { return absl::nullopt; } + absl::optional grpcTimeoutOffset() const override { + return absl::nullopt; + } const Router::VirtualCluster* virtualCluster(const Http::HeaderMap&) const override { return nullptr; } diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index ca8b703c57291..29f06492dc124 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -337,6 +337,7 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, timeout_(PROTOBUF_GET_MS_OR_DEFAULT(route.route(), timeout, DEFAULT_ROUTE_TIMEOUT_MS)), idle_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), idle_timeout)), max_grpc_timeout_(PROTOBUF_GET_OPTIONAL_MS(route.route(), max_grpc_timeout)), + grpc_timeout_offset_(PROTOBUF_GET_OPTIONAL_MS(route.route(), grpc_timeout_offset)), loader_(factory_context.runtime()), runtime_(loadRuntimeData(route.match())), scheme_redirect_(route.redirect().scheme_redirect()), host_redirect_(route.redirect().host_redirect()), diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 2ded20d38e4fe..2cf26dbbf3f1b 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -393,6 +393,9 @@ class RouteEntryImplBase : public RouteEntry, absl::optional maxGrpcTimeout() const override { return max_grpc_timeout_; } + absl::optional grpcTimeoutOffset() const override { + return grpc_timeout_offset_; + } const VirtualHost& virtualHost() const override { return vhost_; } bool autoHostRewrite() const override { return auto_host_rewrite_; } const std::multimap& opaqueConfig() const override { @@ -481,6 +484,9 @@ class RouteEntryImplBase : public RouteEntry, absl::optional maxGrpcTimeout() const override { return parent_->maxGrpcTimeout(); } + absl::optional grpcTimeoutOffset() const override { + return parent_->maxGrpcTimeout(); + } const MetadataMatchCriteria* metadataMatchCriteria() const override { return parent_->metadataMatchCriteria(); } @@ -604,6 +610,7 @@ class RouteEntryImplBase : public RouteEntry, const std::chrono::milliseconds timeout_; const absl::optional idle_timeout_; const absl::optional max_grpc_timeout_; + const absl::optional grpc_timeout_offset_; Runtime::Loader& loader_; const absl::optional runtime_; const std::string scheme_redirect_; diff --git a/source/common/router/router.cc b/source/common/router/router.cc index bb063522e3019..47f40754ba81e 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -125,6 +125,15 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he if (grpc_request && route.maxGrpcTimeout()) { const std::chrono::milliseconds max_grpc_timeout = route.maxGrpcTimeout().value(); std::chrono::milliseconds grpc_timeout = Grpc::Common::getGrpcTimeout(request_headers); + if (route.grpcTimeoutOffset()) { + // We only apply the offset if it won't result in grpc_timeout hitting 0 or below, as + // setting it to 0 means infinity and a negative timeout makes no sense. + const auto offset = *route.grpcTimeoutOffset(); + if (offset < grpc_timeout) { + grpc_timeout -= offset; + } + } + // Cap gRPC timeout to the configured maximum considering that 0 means infinity. if (max_grpc_timeout != std::chrono::milliseconds(0) && (grpc_timeout == std::chrono::milliseconds(0) || grpc_timeout > max_grpc_timeout)) { diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index b2d2a1589262d..f7a9e36ce6e4f 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -1878,6 +1878,36 @@ TEST_F(RouteMatcherTest, ContentType) { } } +TEST_F(RouteMatcherTest, GrpcTimeoutOffset) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster: local_service_grpc + - match: + prefix: "/" + route: + grpc_timeout_offset: 0.01s + cluster: local_service_grpc + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + + { + EXPECT_EQ( + absl::make_optional(std::chrono::milliseconds(10)), + config.route(genHeaders("www.lyft.com", "/", "GET"), 0)->routeEntry()->grpcTimeoutOffset()); + } + EXPECT_EQ(absl::nullopt, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->grpcTimeoutOffset()); +} + TEST_F(RouteMatcherTest, FractionalRuntime) { const std::string yaml = R"EOF( virtual_hosts: diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index b3d8689de565e..e083c5e587ade 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -2625,6 +2625,28 @@ TEST(RouterFilterUtilityTest, FinalTimeout) { EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_); EXPECT_EQ("999m", headers.get_("grpc-timeout")); } + { + NiceMock route; + EXPECT_CALL(route, maxGrpcTimeout()) + .WillRepeatedly(Return(absl::optional(999))); + EXPECT_CALL(route, grpcTimeoutOffset()) + .WillRepeatedly(Return(absl::optional(10))); + Http::TestHeaderMapImpl headers{{"content-type", "application/grpc"}, {"grpc-timeout", "100m"}}; + FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(route, headers, true, true); + EXPECT_EQ(std::chrono::milliseconds(90), timeout.global_timeout_); + EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_); + } + { + NiceMock route; + EXPECT_CALL(route, maxGrpcTimeout()) + .WillRepeatedly(Return(absl::optional(999))); + EXPECT_CALL(route, grpcTimeoutOffset()) + .WillRepeatedly(Return(absl::optional(10))); + Http::TestHeaderMapImpl headers{{"content-type", "application/grpc"}, {"grpc-timeout", "1m"}}; + FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(route, headers, true, true); + EXPECT_EQ(std::chrono::milliseconds(1), timeout.global_timeout_); + EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_); + } { NiceMock route; EXPECT_CALL(route, maxGrpcTimeout()) diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index f49cd5f719363..2a5d99650c8a5 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -272,6 +272,7 @@ class MockRouteEntry : public RouteEntry { MOCK_CONST_METHOD0(timeout, std::chrono::milliseconds()); MOCK_CONST_METHOD0(idleTimeout, absl::optional()); MOCK_CONST_METHOD0(maxGrpcTimeout, absl::optional()); + MOCK_CONST_METHOD0(grpcTimeoutOffset, absl::optional()); MOCK_CONST_METHOD1(virtualCluster, const VirtualCluster*(const Http::HeaderMap& headers)); MOCK_CONST_METHOD0(virtualHostName, const std::string&()); MOCK_CONST_METHOD0(virtualHost, const VirtualHost&()); From 32e4d286668731594eb5c81ed664bd144d8d2d88 Mon Sep 17 00:00:00 2001 From: Andres Guedez <34292400+AndresGuedez@users.noreply.github.com> Date: Sat, 20 Apr 2019 16:31:01 -0400 Subject: [PATCH 162/165] thread: remove ThreadFactorySingleton (#6658) It is no longer needed since Api::Api is plumbed ubiquitiously throughout Envoy's core. The only user of the factory, QuicThreadImpl, has been modified to take the Envoy::Thread::ThreadFactory via QuicThreadImpl::setThreadFactory(). Signed-off-by: Andres Guedez --- include/envoy/thread/thread.h | 28 --------------- source/common/thread/BUILD | 18 ---------- .../common/thread/thread_factory_singleton.cc | 21 ----------- source/exe/BUILD | 1 - source/exe/main_common.cc | 6 +--- .../quiche/platform/quic_thread_impl.h | 12 ++++++- test/BUILD | 1 - test/common/thread/BUILD | 18 ---------- .../thread/thread_factory_singleton_test.cc | 35 ------------------- test/exe/main_common_test.cc | 15 +------- .../quic_listeners/quiche/platform/BUILD | 3 +- .../quiche/platform/quic_platform_test.cc | 16 ++++++--- test/main.cc | 1 - 13 files changed, 26 insertions(+), 149 deletions(-) delete mode 100644 source/common/thread/BUILD delete mode 100644 source/common/thread/thread_factory_singleton.cc delete mode 100644 test/common/thread/BUILD delete mode 100644 test/common/thread/thread_factory_singleton_test.cc diff --git a/include/envoy/thread/thread.h b/include/envoy/thread/thread.h index 6bde21178f849..e9078afa476ba 100644 --- a/include/envoy/thread/thread.h +++ b/include/envoy/thread/thread.h @@ -51,34 +51,6 @@ class ThreadFactory { virtual ThreadIdPtr currentThreadId() PURE; }; -/** - * A static singleton to the ThreadFactory corresponding to the build platform. - * - * The singleton must be initialized via set() early in main() with the appropriate ThreadFactory - * (see source/exe/{posix,win32}/platform_impl.h). - * - * This static singleton is an exception to Envoy's established practice for handling of singletons, - * which are typically registered with and accessed via the Envoy::Singleton::Manager. Reasons for - * the exception include drastic simplification of thread safety assertions; e.g.: - * ASSERT(ThreadFactorySingleton::get()->currentThreadId() == original_thread_id_); - */ -class ThreadFactorySingleton { -public: - /** - * Returns a reference to the platform dependent ThreadFactory. - */ - static ThreadFactory& get() { return *thread_factory_; } - - /** - * Sets the singleton to the supplied thread_factory. - * @param thread_factory the ThreadFactory instance to be pointed to by this singleton. - */ - static void set(ThreadFactory* thread_factory); - -private: - static ThreadFactory* thread_factory_; -}; - /** * Like the C++11 "basic lockable concept" but a pure virtual interface vs. a template, and * with thread annotations. diff --git a/source/common/thread/BUILD b/source/common/thread/BUILD deleted file mode 100644 index 8ba062af10205..0000000000000 --- a/source/common/thread/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "thread_factory_singleton_lib", - srcs = ["thread_factory_singleton.cc"], - deps = [ - "//include/envoy/thread:thread_interface", - "//source/common/common:assert_lib", - ], -) diff --git a/source/common/thread/thread_factory_singleton.cc b/source/common/thread/thread_factory_singleton.cc deleted file mode 100644 index c3c8d8a62e883..0000000000000 --- a/source/common/thread/thread_factory_singleton.cc +++ /dev/null @@ -1,21 +0,0 @@ -#include "envoy/thread/thread.h" - -#include "common/common/assert.h" - -namespace Envoy { -namespace Thread { - -ThreadFactory* ThreadFactorySingleton::thread_factory_{nullptr}; - -// This function can not be inlined in the thread.h header due to the use of ASSERT() creating a -// circular dependency with assert.h. -void ThreadFactorySingleton::set(ThreadFactory* thread_factory) { - // Verify that either the singleton is uninitialized (i.e., thread_factory_ == nullptr) OR it's - // being reset to the uninitialized state (i.e., thread_factory == nullptr), but _not_ both. The - // use of XOR complicates tests but improves our ability to catch init/cleanup errors. - ASSERT((thread_factory == nullptr) != (thread_factory_ == nullptr)); - thread_factory_ = thread_factory; -} - -} // namespace Thread -} // namespace Envoy diff --git a/source/exe/BUILD b/source/exe/BUILD index bcce85238a6b8..7e4d98a84b030 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -71,7 +71,6 @@ envoy_cc_library( "//source/common/http/http2:codec_lib", "//source/common/common:perf_annotation_lib", "//source/common/stats:fake_symbol_table_lib", - "//source/common/thread:thread_factory_singleton_lib", "//source/server:hot_restart_lib", "//source/server:hot_restart_nop_lib", "//source/server:proto_descriptors_lib", diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index 53f828142c2e5..2dd60ef2033e8 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -49,7 +49,6 @@ MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& ti Filesystem::Instance& file_system) : options_(options), component_factory_(component_factory), thread_factory_(thread_factory), file_system_(file_system) { - Thread::ThreadFactorySingleton::set(&thread_factory_); ares_library_init(ARES_LIB_INIT_ALL); Event::Libevent::Global::initialize(); RELEASE_ASSERT(Envoy::Server::validateProtoDescriptors(), ""); @@ -98,10 +97,7 @@ MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& ti } } -MainCommonBase::~MainCommonBase() { - Thread::ThreadFactorySingleton::set(nullptr); - ares_library_cleanup(); -} +MainCommonBase::~MainCommonBase() { ares_library_cleanup(); } void MainCommonBase::configureComponentLogLevels() { for (auto& component_log_level : options_.componentLogLevels()) { diff --git a/source/extensions/quic_listeners/quiche/platform/quic_thread_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_thread_impl.h index 42e84bb27c7dc..bf2b63419a5b4 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_thread_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_thread_impl.h @@ -30,10 +30,11 @@ class QuicThreadImpl { } void Start() { + ASSERT(thread_factory_ != nullptr); if (thread_ != nullptr || thread_is_set_.HasBeenNotified()) { PANIC("QuicThread can only be started once."); } - thread_ = Envoy::Thread::ThreadFactorySingleton::get().createThread([this]() { + thread_ = thread_factory_->createThread([this]() { thread_is_set_.WaitForNotification(); this->Run(); }); @@ -48,6 +49,14 @@ class QuicThreadImpl { thread_ = nullptr; } + // Sets the thread factory to use. + // NOTE: The factory can not be passed via a constructor argument because this class is itself a + // dependency of an external library that derives from it and expects a single argument + // constructor. + void setThreadFactory(Envoy::Thread::ThreadFactory& thread_factory) { + thread_factory_ = &thread_factory; + } + protected: virtual void Run() { // We don't want this function to be pure virtual, because it will be called if: @@ -61,6 +70,7 @@ class QuicThreadImpl { private: Envoy::Thread::ThreadPtr thread_; + Envoy::Thread::ThreadFactory* thread_factory_; absl::Notification thread_is_set_; // Whether |thread_| is set in parent. }; diff --git a/test/BUILD b/test/BUILD index 9e194a76dcf2d..6584424aaa4f2 100644 --- a/test/BUILD +++ b/test/BUILD @@ -30,7 +30,6 @@ envoy_cc_test_library( "//source/common/common:thread_lib", "//source/common/event:libevent_lib", "//source/common/http/http2:codec_lib", - "//source/common/thread:thread_factory_singleton_lib", "//test/common/runtime:utility_lib", "//test/mocks/access_log:access_log_mocks", "//test/test_common:environment_lib", diff --git a/test/common/thread/BUILD b/test/common/thread/BUILD deleted file mode 100644 index 382cca7904df0..0000000000000 --- a/test/common/thread/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_test", - "envoy_package", -) - -envoy_package() - -envoy_cc_test( - name = "thread_factory_singleton_test", - srcs = ["thread_factory_singleton_test.cc"], - deps = [ - "//source/common/common:assert_lib", - "//source/common/thread:thread_factory_singleton_lib", - ], -) diff --git a/test/common/thread/thread_factory_singleton_test.cc b/test/common/thread/thread_factory_singleton_test.cc deleted file mode 100644 index d50fc69ee417c..0000000000000 --- a/test/common/thread/thread_factory_singleton_test.cc +++ /dev/null @@ -1,35 +0,0 @@ -#include "envoy/thread/thread.h" - -#include "common/common/assert.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -namespace Envoy { -namespace Thread { -namespace { - -class ThreadFactorySingletonTest : public testing::Test { -protected: - ThreadFactorySingletonTest() - : run_tid_(Envoy::Thread::ThreadFactorySingleton::get().currentThreadId()) {} - - bool checkThreadId() const { return run_tid_->isCurrentThreadId(); }; - - ThreadIdPtr run_tid_; -}; - -// Verify that Thread::threadFactorySingleton is defined and initialized for tests. -TEST_F(ThreadFactorySingletonTest, IsCurrentThread) { - // Use std::thread instead of the ThreadFactory's createThread() to avoid the dependency on the - // code under test. - bool is_current = checkThreadId(); - EXPECT_TRUE(is_current); - std::thread thread([this, &is_current]() { is_current = checkThreadId(); }); - thread.join(); - EXPECT_FALSE(is_current) << "run_tid_->isCurrentThreadId() from inside another thread"; -} - -} // namespace -} // namespace Thread -} // namespace Envoy diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 2b1aee544fec3..44c693a91f8e6 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -40,20 +40,7 @@ class MainCommonTest : public testing::TestWithParam Date: Mon, 22 Apr 2019 21:43:46 +0530 Subject: [PATCH 163/165] fix version history order (#6671) Signed-off-by: Rama Chavali --- docs/root/intro/version_history.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index e4f05582044d0..3e15c08e59c40 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,8 +5,8 @@ Version history ================ * access log: added a new field for response code details in :ref:`file access logger` and :ref:`gRPC access logger`. * dubbo_proxy: support the :ref:`Dubbo proxy filter `. -* ext_authz: added option to `ext_authz` that allows the filter clearing route cache. * eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. +* ext_authz: added option to `ext_authz` that allows the filter clearing route cache. * http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. * redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. * redis: add support for zpopmax and zpopmin commands. From a3fe3c6ef03ae7386974bc27225700eab1b48a6f Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Tue, 23 Apr 2019 00:29:10 +0530 Subject: [PATCH 164/165] docs: move xds protocol to rst (#6670) This PR moves the xds protocol from md to rst. Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Fixes #6338 Signed-off-by: Rama Chavali --- api/XDS_PROTOCOL.md | 396 --------------- api/xds_protocol.rst | 456 ++++++++++++++++++ docs/build.sh | 7 + docs/root/api/api.rst | 11 + docs/root/index.rst | 2 +- .../intro/arch_overview/cluster_manager.rst | 2 + docs/root/intro/intro.rst | 1 + 7 files changed, 478 insertions(+), 397 deletions(-) delete mode 100644 api/XDS_PROTOCOL.md create mode 100644 api/xds_protocol.rst create mode 100644 docs/root/api/api.rst diff --git a/api/XDS_PROTOCOL.md b/api/XDS_PROTOCOL.md deleted file mode 100644 index c2bd0aa0cbee2..0000000000000 --- a/api/XDS_PROTOCOL.md +++ /dev/null @@ -1,396 +0,0 @@ -# xDS REST and gRPC protocol - -Envoy discovers its various dynamic resources via the filesystem or by querying -one or more management servers. Collectively, these discovery services and their -corresponding APIs are referred to as _xDS_. Resources are requested via -_subscriptions_, by specifying a filesystem path to watch, initiating gRPC -streams or polling a REST-JSON URL. The latter two methods involve sending -requests with a -[`DiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest) -proto payload. Resources are delivered in a -[`DiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryresponse) -proto payload in all methods. We discuss each type of subscription below. - -## Filesystem subscriptions - -The simplest approach to delivering dynamic configuration is to place it at a -well known path specified in the -[`ConfigSource`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/config_source.proto#core-configsource). -Envoy will use `inotify` (`kqueue` on macOS) to monitor the file for changes -and parse the `DiscoveryResponse` proto in the file on update. Binary -protobufs, JSON, YAML and proto text are supported formats for the -`DiscoveryResponse`. - -There is no mechanism available for filesystem subscriptions to ACK/NACK updates -beyond stats counters and logs. The last valid configuration for an xDS API will -continue to apply if an configuration update rejection occurs. - -## Streaming gRPC subscriptions - -### Singleton resource type discovery - -A gRPC -[`ApiConfigSource`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/config_source.proto#core-apiconfigsource) -can be specified independently for each xDS API, pointing at an upstream -cluster corresponding to a management server. This will initiate an independent -bidirectional gRPC stream for each xDS resource type, potentially to distinct -management servers. API delivery is eventually consistent. See -[ADS](#aggregated-discovery-service) below for situations in which explicit -control of sequencing is required. - -#### Type URLs - -Each xDS API is concerned with resources of a given type. There is a 1:1 -correspondence between an xDS API and a resource type. That is: - -* [LDS: `envoy.api.v2.Listener`](envoy/api/v2/lds.proto) -* [RDS: `envoy.api.v2.RouteConfiguration`](envoy/api/v2/rds.proto) -* [VHDS: `envoy.api.v2.Vhds`](envoy/api/v2/rds.proto) -* [CDS: `envoy.api.v2.Cluster`](envoy/api/v2/cds.proto) -* [EDS: `envoy.api.v2.ClusterLoadAssignment`](envoy/api/v2/eds.proto) -* [SDS: `envoy.api.v2.Auth.Secret`](envoy/api/v2/auth/cert.proto) - -The concept of [_type -URLs_](https://developers.google.com/protocol-buffers/docs/proto3#any) appears -below, and takes the form `type.googleapis.com/`, e.g. -`type.googleapis.com/envoy.api.v2.Cluster` for CDS. In various requests from -Envoy and responses by the management server, the resource type URL is stated. - -#### ACK/NACK and versioning - -Each stream begins with a `DiscoveryRequest` from Envoy, specifying the list of -resources to subscribe to, the type URL corresponding to the subscribed -resources, the node identifier and an empty `version_info`. An example EDS request -might be: - -```yaml -version_info: -node: { id: envoy } -resource_names: -- foo -- bar -type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment -response_nonce: -``` - -The management server may reply either immediately or when the requested -resources are available with a `DiscoveryResponse`, e.g.: - -```yaml -version_info: X -resources: -- foo ClusterLoadAssignment proto encoding -- bar ClusterLoadAssignment proto encoding -type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment -nonce: A -``` - -After processing the `DiscoveryResponse`, Envoy will send a new request on the -stream, specifying the last version successfully applied and the nonce provided -by the management server. If the update was successfully applied, the -`version_info` will be __X__, as indicated in the sequence diagram: - -![Version update after ACK](diagrams/simple-ack.svg) - -In this sequence diagram, and below, the following format is used to abbreviate -messages: -* `DiscoveryRequest`: (V=`version_info`,R=`resource_names`,N=`response_nonce`,T=`type_url`) -* `DiscoveryResponse`: (V=`version_info`,R=`resources`,N=`nonce`,T=`type_url`) - -The version provides Envoy and the management server a shared notion of the -currently applied configuration, as well as a mechanism to ACK/NACK -configuration updates. If Envoy had instead rejected configuration update __X__, -it would reply with -[`error_detail`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#envoy-api-field-discoveryrequest-error-detail) -populated and its previous version, which in this case was the empty -initial version. The error_detail has more details around the exact error message -populated in the message field: - -![No version update after NACK](diagrams/simple-nack.svg) - -Later, an API update may succeed at a new version __Y__: - -![ACK after NACK](diagrams/later-ack.svg) - -Each stream has its own notion of versioning, there is no shared versioning -across resource types. When ADS is not used, even each resource of a given -resource type may have a -distinct version, since the Envoy API allows distinct EDS/RDS resources to point -at different `ConfigSource`s. - -#### When to send an update - -The management server should only send updates to the Envoy client when the -resources in the `DiscoveryResponse` have changed. Envoy replies to any -`DiscoveryResponse` with a `DiscoveryRequest` containing the ACK/NACK -immediately after it has been either accepted or rejected. If the management -server provides the same set of resources rather than waiting for a change to -occur, it will cause Envoy and the management server to spin and have a severe -performance impact. - -Within a stream, new `DiscoveryRequest`s supersede any prior `DiscoveryRequest`s -having the same resource type. This means that the management server only needs -to respond to the latest `DiscoveryRequest` on each stream for any given resource -type. - -#### Resource hints - -The `resource_names` specified in the `DiscoveryRequest` are a hint. Some -resource types, e.g. `Cluster`s and `Listener`s will specify an empty -`resource_names` list, since Envoy is interested in learning about all the -`Cluster`s (CDS) and `Listener`s (LDS) that the management server(s) know about -corresponding to its node identification. Other resource types, e.g. -`RouteConfiguration`s (RDS) and `ClusterLoadAssignment`s (EDS), follow from -earlier CDS/LDS updates and Envoy is able to explicitly enumerate these -resources. - -LDS/CDS resource hints will always be empty and it is expected that the -management server will provide the complete state of the LDS/CDS resources in -each response. An absent `Listener` or `Cluster` will be deleted. - -For EDS/RDS, the management server does not need to supply every requested -resource and may also supply additional, unrequested resources. `resource_names` -is only a hint. Envoy will silently ignore any superfluous resources. When a -requested resource is missing in a RDS or EDS update, Envoy will retain the last -known value for this resource except in the case where the `Cluster` or `Listener` -is being warmed. See [Resource warming](#resource-warming) section below on the expectations -during warming. The management server may be able to infer all -the required EDS/RDS resources from the `node` identification in the -`DiscoveryRequest`, in which case this hint may be discarded. An empty EDS/RDS -`DiscoveryResponse` is effectively a nop from the perspective of the respective -resources in the Envoy. - -When a `Listener` or `Cluster` is deleted, its corresponding EDS and RDS -resources are also deleted inside the Envoy instance. In order for EDS resources -to be known or tracked by Envoy, there must exist an applied `Cluster` -definition (e.g. sourced via CDS). A similar relationship exists between RDS and -`Listeners` (e.g. sourced via LDS). - -For EDS/RDS, Envoy may either generate a distinct stream for each resource of a -given type (e.g. if each `ConfigSource` has its own distinct upstream cluster -for a management server), or may combine together multiple resource requests for -a given resource type when they are destined for the same management server. -While this is left to implementation specifics, management servers should be capable -of handling one or more `resource_names` for a given resource type in each -request. Both sequence diagrams below are valid for fetching two EDS resources -`{foo, bar}`: - -![Multiple EDS requests on the same stream](diagrams/eds-same-stream.svg) -![Multiple EDS requests on distinct streams](diagrams/eds-distinct-stream.svg) - -#### Resource updates - -As discussed above, Envoy may update the list of `resource_names` it presents to -the management server in each `DiscoveryRequest` that ACK/NACKs a specific -`DiscoveryResponse`. In addition, Envoy may later issue additional -`DiscoveryRequest`s at a given `version_info` to update the management server -with new resource hints. For example, if Envoy is at EDS version __X__ and knows -only about cluster `foo`, but then receives a CDS update and learns about `bar` -in addition, it may issue an additional `DiscoveryRequest` for __X__ with -`{foo,bar}` as `resource_names`. - -![CDS response leads to EDS resource hint update](diagrams/cds-eds-resources.svg) - -There is a race condition that may arise here; if after a resource hint update -is issued by Envoy at __X__, but before the management server processes the -update it replies with a new version __Y__, the resource hint update may be -interpreted as a rejection of __Y__ by presenting an __X__ `version_info`. To -avoid this, the management server provides a `nonce` that Envoy uses to indicate -the specific `DiscoveryResponse` each `DiscoveryRequest` corresponds to: - -![EDS update race motivates nonces](diagrams/update-race.svg) - -The management server should not send a `DiscoveryResponse` for any -`DiscoveryRequest` that has a stale nonce. A nonce becomes stale following a -newer nonce being presented to Envoy in a `DiscoveryResponse`. A management -server does not need to send an update until it determines a new version is -available. Earlier requests at a version then also become stale. It may process -multiple `DiscoveryRequests` at a version until a new version is ready. - -![Requests become stale](diagrams/stale-requests.svg) - -An implication of the above resource update sequencing is that Envoy does not -expect a `DiscoveryResponse` for every `DiscoveryRequest` it issues. - -### Resource warming - -[`Clusters`](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/cluster_manager.html#cluster-warming) -and [`Listeners`](https://www.envoyproxy.io/docs/envoy/latest/configuration/listeners/lds#config-listeners-lds) -go through `warming` before they can serve requests. This process happens both during -[`Envoy initialization`](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/init.html#initialization) -and when the `Cluster` or `Listener` is updated. Warming of `Cluster` is completed only when a -`ClusterLoadAssignment` response is supplied by management server. Similarly, warming of `Listener` -is completed only when a `RouteConfiguration` is supplied by management server if the listener -refers to an RDS configuration. Management server is expected to provide the EDS/RDS updates during -warming. If management server does not provide EDS/RDS responses, Envoy will not initialize -itself during the initialization phase and the updates sent via CDS/LDS will not take effect until -EDS/RDS responses are supplied. - -#### Eventual consistency considerations - -Since Envoy's xDS APIs are eventually consistent, traffic may drop briefly -during updates. For example, if only cluster __X__ is known via CDS/EDS, -a `RouteConfiguration` references cluster __X__ -and is then adjusted to cluster __Y__ just before the CDS/EDS update -providing __Y__, traffic will be blackholed until __Y__ is known about by the -Envoy instance. - -For some applications, a temporary drop of traffic is acceptable, retries at the -client or by other Envoy sidecars will hide this drop. For other scenarios where -drop can't be tolerated, traffic drop could have been avoided by providing a -CDS/EDS update with both __X__ and __Y__, then the RDS update repointing from -__X__ to __Y__ and then a CDS/EDS update dropping __X__. - -In general, to avoid traffic drop, sequencing of updates should follow a -`make before break` model, wherein -* CDS updates (if any) must always be pushed first. -* EDS updates (if any) must arrive after CDS updates for the respective clusters. -* LDS updates must arrive after corresponding CDS/EDS updates. -* RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates. -* VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates. -* Stale CDS clusters and related EDS endpoints (ones no longer being - referenced) can then be removed. - -xDS updates can be pushed independently if no new clusters/routes/listeners -are added or if it's acceptable to temporarily drop traffic during -updates. Note that in case of LDS updates, the listeners will be warmed -before they receive traffic, i.e. the dependent routes are fetched through -RDS if configured. Clusters are warmed when adding/removing/updating -clusters. On the other hand, routes are not warmed, i.e., the management -plane must ensure that clusters referenced by a route are in place, before -pushing the updates for a route. - -### Aggregated Discovery Services (ADS) - -It's challenging to provide the above guarantees on sequencing to avoid traffic -drop when management servers are distributed. ADS allow a single management -server, via a single gRPC stream, to deliver all API updates. This provides the -ability to carefully sequence updates to avoid traffic drop. With ADS, a single -stream is used with multiple independent `DiscoveryRequest`/`DiscoveryResponse` -sequences multiplexed via the type URL. For any given type URL, the above -sequencing of `DiscoveryRequest` and `DiscoveryResponse` messages applies. An -example update sequence might look like: - -![EDS/CDS multiplexed on an ADS stream](diagrams/ads.svg) - -A single ADS stream is available per Envoy instance. - -An example minimal `bootstrap.yaml` fragment for ADS configuration is: - -```yaml -node: - id: -dynamic_resources: - cds_config: {ads: {}} - lds_config: {ads: {}} - ads_config: - api_type: GRPC - grpc_services: - envoy_grpc: - cluster_name: ads_cluster -static_resources: - clusters: - - name: ads_cluster - connect_timeout: { seconds: 5 } - type: STATIC - hosts: - - socket_address: - address: - port_value: - lb_policy: ROUND_ROBIN - http2_protocol_options: {} - upstream_connection_options: - # configure a TCP keep-alive to detect and reconnect to the admin - # server in the event of a TCP socket disconnection - tcp_keepalive: - ... -admin: - ... - -``` - -### Incremental xDS - -Incremental xDS is a separate xDS endpoint that: - - * Allows the protocol to communicate on the wire in terms of resource/resource - name deltas ("Delta xDS"). This supports the goal of scalability of xDS - resources. Rather than deliver all 100k clusters when a single cluster is - modified, the management server only needs to deliver the single cluster - that changed. - * Allows the Envoy to on-demand / lazily request additional resources. For - example, requesting a cluster only when a request for that cluster arrives. - -An Incremental xDS session is always in the context of a gRPC bidirectional -stream. This allows the xDS server to keep track of the state of xDS clients -connected to it. There is no REST version of Incremental xDS yet. - -In the delta xDS wire protocol, the nonce field is required and used to pair a -[`DeltaDiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#deltadiscoveryresponse) -to a [`DeltaDiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#deltadiscoveryrequest) -ACK or NACK. -Optionally, a response message level system_version_info is present for -debugging purposes only. - -`DeltaDiscoveryRequest` can be sent in 3 situations: - 1. Initial message in a xDS bidirectional gRPC stream. - 2. As an ACK or NACK response to a previous `DeltaDiscoveryResponse`. - In this case the `response_nonce` is set to the nonce value in the Response. - ACK or NACK is determined by the absence or presence of `error_detail`. - 3. Spontaneous `DeltaDiscoveryRequest` from the client. - This can be done to dynamically add or remove elements from the tracked - `resource_names` set. In this case `response_nonce` must be omitted. - -In this first example the client connects and receives a first update that it -ACKs. The second update fails and the client NACKs the update. Later the xDS -client spontaneously requests the "wc" resource. - -![Incremental session example](diagrams/incremental.svg) - -On reconnect the Incremental xDS client may tell the server of its known -resources to avoid resending them over the network. Because no state is assumed -to be preserved from the previous stream, the reconnecting client must provide -the server with all resource names it is interested in. - -![Incremental reconnect example](diagrams/incremental-reconnect.svg) - -#### Resource names -Resources are identified by a resource name or an alias. Aliases of a resource, if present, can be -identified by the alias field in the resource of a `DeltaDiscoveryResponse`. The resource name will -be returned in the name field in the resource of a `DeltaDiscoveryResponse`. - -#### Subscribing to Resources -The client can send either an alias or the name of a resource in the `resource_names_subscribe` -field of a `DeltaDiscoveryRequest` in order to subscribe to a resource. Both the names and aliases -of resources should be checked in order to determine whether the entity in question has been -subscribed to. - -A `resource_names_subscribe` field may contain resource names that the server believes the client -is already subscribed to, and furthermore has the most recent versions of. However, the server -*must* still provide those resources in the response; due to implementation details hidden from -the server, the client may have "forgotten" those resources despite apparently remaining subscribed. - -#### Unsubscribing from Resources -When a client loses interest in some resources, it will indicate that with the -`resource_names_unsubscribe` field of a `DeltaDiscoveryRequest`. As with `resource_names_subscribe`, -these may be resource names or aliases. - -A `resource_names_unsubscribe` field may contain superfluous resource names, which the server -thought the client was already not subscribed to. The server must cleanly process such a request; -it can simply ignore these phantom unsubscriptions. - -## REST-JSON polling subscriptions - -Synchronous (long) polling via REST endpoints is also available for the xDS -singleton APIs. The above sequencing of messages is similar, except no -persistent stream is maintained to the management server. It is expected that -there is only a single outstanding request at any point in time, and as a result -the response nonce is optional in REST-JSON. The [JSON canonical transform of -proto3](https://developers.google.com/protocol-buffers/docs/proto3#json) is used -to encode `DiscoveryRequest` and `DiscoveryResponse` messages. ADS is not -available for REST-JSON polling. - -When the poll period is set to a small value, with the intention of long -polling, then there is also a requirement to avoid sending a `DiscoveryResponse` -[unless a change to the underlying resources has -occurred](#when-to-send-an-update). diff --git a/api/xds_protocol.rst b/api/xds_protocol.rst new file mode 100644 index 0000000000000..40f323c4bd0ad --- /dev/null +++ b/api/xds_protocol.rst @@ -0,0 +1,456 @@ +xDS REST and gRPC protocol +========================== + +Envoy discovers its various dynamic resources via the filesystem or by +querying one or more management servers. Collectively, these discovery +services and their corresponding APIs are referred to as *xDS*. +Resources are requested via *subscriptions*, by specifying a filesystem +path to watch, initiating gRPC streams or polling a REST-JSON URL. The +latter two methods involve sending requests with a :ref:`DiscoveryRequest ` +proto payload. Resources are delivered in a +:ref:`DiscoveryResponse ` +proto payload in all methods. We discuss each type of subscription +below. + +Filesystem subscriptions +------------------------ + +The simplest approach to delivering dynamic configuration is to place it +at a well known path specified in the :ref:`ConfigSource `. +Envoy will use `inotify` (`kqueue` on macOS) to monitor the file for +changes and parse the +:ref:`DiscoveryResponse ` proto in the file on update. +Binary protobufs, JSON, YAML and proto text are supported formats for +the +:ref:`DiscoveryResponse `. + +There is no mechanism available for filesystem subscriptions to ACK/NACK +updates beyond stats counters and logs. The last valid configuration for +an xDS API will continue to apply if an configuration update rejection +occurs. + +Streaming gRPC subscriptions +---------------------------- + +Singleton resource type discovery +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A gRPC +:ref:`ApiConfigSource ` +can be specified independently for each xDS API, pointing at an upstream +cluster corresponding to a management server. This will initiate an +independent bidirectional gRPC stream for each xDS resource type, +potentially to distinct management servers. API delivery is eventually +consistent. See :ref:`Aggregated Discovery Service` below for +situations in which explicit control of sequencing is required. + +Type URLs +^^^^^^^^^ + +Each xDS API is concerned with resources of a given type. There is a 1:1 +correspondence between an xDS API and a resource type. That is: + +- LDS: :ref:`envoy.api.v2.Listener ` +- RDS: :ref:`envoy.api.v2.RouteConfiguration ` +- VHDS: :ref:`envoy.api.v2.Vhds ` +- CDS: :ref:`envoy.api.v2.Cluster ` +- EDS: :ref:`envoy.api.v2.ClusterLoadAssignment ` +- SDS: :ref:`envoy.api.v2.Auth.Secret ` + +The concept of `type URLs `_ appears below, and takes the form +`type.googleapis.com/`, e.g. +`type.googleapis.com/envoy.api.v2.Cluster` for CDS. In various +requests from Envoy and responses by the management server, the resource +type URL is stated. + +ACK/NACK and versioning +^^^^^^^^^^^^^^^^^^^^^^^ + +Each stream begins with a +:ref:`DiscoveryRequest ` from Envoy, specifying +the list of resources to subscribe to, the type URL corresponding to the +subscribed resources, the node identifier and an empty :ref:`version_info `. +An example EDS request might be: + +.. code:: yaml + + version_info: + node: { id: envoy } + resource_names: + - foo + - bar + type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + response_nonce: + +The management server may reply either immediately or when the requested +resources are available with a :ref:`DiscoveryResponse `, e.g.: + +.. code:: yaml + + version_info: X + resources: + - foo ClusterLoadAssignment proto encoding + - bar ClusterLoadAssignment proto encoding + type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + nonce: A + +After processing the :ref:`DiscoveryResponse `, Envoy will send a new +request on the stream, specifying the last version successfully applied +and the nonce provided by the management server. If the update was +successfully applied, the :ref:`version_info ` will be **X**, as indicated +in the sequence diagram: + +.. figure:: diagrams/simple-ack.svg + :alt: Version update after ACK + +In this sequence diagram, and below, the following format is used to abbreviate messages: + +- *DiscoveryRequest*: (V=version_info,R=resource_names,N=response_nonce,T=type_url) +- *DiscoveryResponse*: (V=version_info,R=resources,N=nonce,T=type_url) + +The version provides Envoy and the management server a shared notion of +the currently applied configuration, as well as a mechanism to ACK/NACK +configuration updates. If Envoy had instead rejected configuration +update **X**, it would reply with :ref:`error_detail ` +populated and its previous version, which in this case was the empty +initial version. The :ref:`error_detail ` has more details around the exact +error message populated in the message field: + +.. figure:: diagrams/simple-nack.svg + :alt: No version update after NACK + +Later, an API update may succeed at a new version **Y**: + + +.. figure:: diagrams/later-ack.svg + :alt: ACK after NACK + +Each stream has its own notion of versioning, there is no shared +versioning across resource types. When ADS is not used, even each +resource of a given resource type may have a distinct version, since the +Envoy API allows distinct EDS/RDS resources to point at different :ref:`ConfigSources `. + +.. _Resource Updates: + +When to send an update +^^^^^^^^^^^^^^^^^^^^^^ + +The management server should only send updates to the Envoy client when +the resources in the :ref:`DiscoveryResponse ` have changed. Envoy replies +to any :ref:`DiscoveryResponse ` with a :ref:`DiscoveryRequest ` containing the +ACK/NACK immediately after it has been either accepted or rejected. If +the management server provides the same set of resources rather than +waiting for a change to occur, it will cause Envoy and the management +server to spin and have a severe performance impact. + +Within a stream, new :ref:`DiscoveryRequests ` supersede any prior +:ref:`DiscoveryRequests ` having the same resource type. This means that +the management server only needs to respond to the latest +:ref:`DiscoveryRequest ` on each stream for any given resource type. + +Resource hints +^^^^^^^^^^^^^^ + +The :ref:`resource_names ` specified in the :ref:`DiscoveryRequest ` are a hint. +Some resource types, e.g. `Clusters` and `Listeners` will +specify an empty :ref:`resource_names ` list, since Envoy is interested in +learning about all the :ref:`Clusters (CDS) ` and :ref:`Listeners (LDS) ` +that the management server(s) know about corresponding to its node +identification. Other resource types, e.g. :ref:`RouteConfiguration (RDS) ` +and :ref:`ClusterLoadAssignment (EDS) `, follow from earlier +CDS/LDS updates and Envoy is able to explicitly enumerate these +resources. + +LDS/CDS resource hints will always be empty and it is expected that the +management server will provide the complete state of the LDS/CDS +resources in each response. An absent `Listener` or `Cluster` will +be deleted. + +For EDS/RDS, the management server does not need to supply every +requested resource and may also supply additional, unrequested +resources. :ref:`resource_names ` is only a hint. Envoy will silently ignore +any superfluous resources. When a requested resource is missing in a RDS +or EDS update, Envoy will retain the last known value for this resource +except in the case where the `Cluster` or `Listener` is being +warmed. See :ref:`Resource warming` section below on +the expectations during warming. The management server may be able to +infer all the required EDS/RDS resources from the :ref:`node ` +identification in the :ref:`DiscoveryRequest `, in which case this hint may +be discarded. An empty EDS/RDS :ref:`DiscoveryResponse ` is effectively a +nop from the perspective of the respective resources in the Envoy. + +When a `Listener` or `Cluster` is deleted, its corresponding EDS and +RDS resources are also deleted inside the Envoy instance. In order for +EDS resources to be known or tracked by Envoy, there must exist an +applied `Cluster` definition (e.g. sourced via CDS). A similar +relationship exists between RDS and `Listeners` (e.g. sourced via +LDS). + +For EDS/RDS, Envoy may either generate a distinct stream for each +resource of a given type (e.g. if each :ref:`ConfigSource ` has its own +distinct upstream cluster for a management server), or may combine +together multiple resource requests for a given resource type when they +are destined for the same management server. While this is left to +implementation specifics, management servers should be capable of +handling one or more :ref:`resource_names ` for a given resource type in +each request. Both sequence diagrams below are valid for fetching two +EDS resources `{foo, bar}`: + +|Multiple EDS requests on the same stream| |Multiple EDS requests on +distinct streams| + +Resource updates +^^^^^^^^^^^^^^^^ + +As discussed above, Envoy may update the list of :ref:`resource_names ` it +presents to the management server in each :ref:`DiscoveryRequest ` that +ACK/NACKs a specific :ref:`DiscoveryResponse `. In addition, Envoy may later +issue additional :ref:`DiscoveryRequests ` at a given :ref:`version_info ` to +update the management server with new resource hints. For example, if +Envoy is at EDS version **X** and knows only about cluster ``foo``, but +then receives a CDS update and learns about ``bar`` in addition, it may +issue an additional :ref:`DiscoveryRequest ` for **X** with `{foo,bar}` as +`resource_names`. + +.. figure:: diagrams/cds-eds-resources.svg + :alt: CDS response leads to EDS resource hint update + +There is a race condition that may arise here; if after a resource hint +update is issued by Envoy at **X**, but before the management server +processes the update it replies with a new version **Y**, the resource +hint update may be interpreted as a rejection of **Y** by presenting an +**X** :ref:`version_info `. To avoid this, the management server provides a +``nonce`` that Envoy uses to indicate the specific :ref:`DiscoveryResponse ` +each :ref:`DiscoveryRequest ` corresponds to: + +.. figure:: diagrams/update-race.svg + :alt: EDS update race motivates nonces + +The management server should not send a :ref:`DiscoveryResponse ` for any +:ref:`DiscoveryRequest ` that has a stale nonce. A nonce becomes stale +following a newer nonce being presented to Envoy in a +:ref:`DiscoveryResponse `. A management server does not need to send an +update until it determines a new version is available. Earlier requests +at a version then also become stale. It may process multiple +:ref:`DiscoveryRequests ` at a version until a new version is ready. + +.. figure:: diagrams/stale-requests.svg + :alt: Requests become stale + +An implication of the above resource update sequencing is that Envoy +does not expect a :ref:`DiscoveryResponse ` for every :ref:`DiscoveryRequests ` +it issues. + +.. _Resource Warming: + +Resource warming +~~~~~~~~~~~~~~~~ + +:ref:`Clusters ` and +:ref:`Listeners ` +go through warming before they can serve requests. This process +happens both during :ref:`Envoy initialization ` +and when the `Cluster` or `Listener` is updated. Warming of +`Cluster` is completed only when a `ClusterLoadAssignment` response +is supplied by management server. Similarly, warming of `Listener` is +completed only when a `RouteConfiguration` is supplied by management +server if the listener refers to an RDS configuration. Management server +is expected to provide the EDS/RDS updates during warming. If management +server does not provide EDS/RDS responses, Envoy will not initialize +itself during the initialization phase and the updates sent via CDS/LDS +will not take effect until EDS/RDS responses are supplied. + +Eventual consistency considerations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Since Envoy's xDS APIs are eventually consistent, traffic may drop +briefly during updates. For example, if only cluster **X** is known via +CDS/EDS, a `RouteConfiguration` references cluster **X** and is then +adjusted to cluster **Y** just before the CDS/EDS update providing +**Y**, traffic will be blackholed until **Y** is known about by the +Envoy instance. + +For some applications, a temporary drop of traffic is acceptable, +retries at the client or by other Envoy sidecars will hide this drop. +For other scenarios where drop can't be tolerated, traffic drop could +have been avoided by providing a CDS/EDS update with both **X** and +**Y**, then the RDS update repointing from **X** to **Y** and then a +CDS/EDS update dropping **X**. + +In general, to avoid traffic drop, sequencing of updates should follow a +make before break model, wherein: + +- CDS updates (if any) must always be pushed first. +- EDS updates (if any) must arrive after CDS updates for the respective clusters. +- LDS updates must arrive after corresponding CDS/EDS updates. +- RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates. +- VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates. +- Stale CDS clusters and related EDS endpoints (ones no longer being referenced) can then be removed. + +xDS updates can be pushed independently if no new +clusters/routes/listeners are added or if it's acceptable to temporarily +drop traffic during updates. Note that in case of LDS updates, the +listeners will be warmed before they receive traffic, i.e. the dependent +routes are fetched through RDS if configured. Clusters are warmed when +adding/removing/updating clusters. On the other hand, routes are not +warmed, i.e., the management plane must ensure that clusters referenced +by a route are in place, before pushing the updates for a route. + +.. _Aggregated Discovery Service: + +Aggregated Discovery Service (ADS) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It's challenging to provide the above guarantees on sequencing to avoid +traffic drop when management servers are distributed. ADS allow a single +management server, via a single gRPC stream, to deliver all API updates. +This provides the ability to carefully sequence updates to avoid traffic +drop. With ADS, a single stream is used with multiple independent +:ref:`DiscoveryRequest `/:ref:`DiscoveryResponse ` sequences multiplexed via the +type URL. For any given type URL, the above sequencing of +:ref:`DiscoveryRequest ` and :ref:`DiscoveryResponse ` messages applies. An +example update sequence might look like: + +.. figure:: diagrams/ads.svg + :alt: EDS/CDS multiplexed on an ADS stream + +A single ADS stream is available per Envoy instance. + +An example minimal ``bootstrap.yaml`` fragment for ADS configuration is: + +.. code:: yaml + + node: + id: + dynamic_resources: + cds_config: {ads: {}} + lds_config: {ads: {}} + ads_config: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: ads_cluster + static_resources: + clusters: + - name: ads_cluster + connect_timeout: { seconds: 5 } + type: STATIC + hosts: + - socket_address: + address: + port_value: + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + upstream_connection_options: + # configure a TCP keep-alive to detect and reconnect to the admin + # server in the event of a TCP socket disconnection + tcp_keepalive: + ... + admin: + ... + +Incremental xDS +~~~~~~~~~~~~~~~ + +Incremental xDS is a separate xDS endpoint that: + +- Allows the protocol to communicate on the wire in terms of + resource/resource name deltas ("Delta xDS"). This supports the goal + of scalability of xDS resources. Rather than deliver all 100k + clusters when a single cluster is modified, the management server + only needs to deliver the single cluster that changed. +- Allows the Envoy to on-demand / lazily request additional resources. + For example, requesting a cluster only when a request for that + cluster arrives. + +An Incremental xDS session is always in the context of a gRPC +bidirectional stream. This allows the xDS server to keep track of the +state of xDS clients connected to it. There is no REST version of +Incremental xDS yet. + +In the delta xDS wire protocol, the nonce field is required and used to +pair a :ref:`DeltaDiscoveryResponse ` +to a :ref:`DeltaDiscoveryRequest ` +ACK or NACK. Optionally, a response message level :ref:`system_version_info ` +is present for debugging purposes only. + +:ref:`DeltaDiscoveryRequest ` can be sent in the following situations: + +- Initial message in a xDS bidirectional gRPC stream. +- As an ACK or NACK response to a previous :ref:`DeltaDiscoveryResponse `. In this case the :ref:`response_nonce ` is set to the nonce value in the Response. ACK or NACK is determined by the absence or presence of :ref:`error_detail `. +- Spontaneous :ref:`DeltaDiscoveryRequests ` from the client. This can be done to dynamically add or remove elements from the tracked :ref:`resource_names ` set. In this case :ref:`response_nonce ` must be omitted. + +In this first example the client connects and receives a first update +that it ACKs. The second update fails and the client NACKs the update. +Later the xDS client spontaneously requests the "wc" resource. + +.. figure:: diagrams/incremental.svg + :alt: Incremental session example + +On reconnect the Incremental xDS client may tell the server of its known +resources to avoid resending them over the network. Because no state is +assumed to be preserved from the previous stream, the reconnecting +client must provide the server with all resource names it is interested +in. + +.. figure:: diagrams/incremental-reconnect.svg + :alt: Incremental reconnect example + +Resource names +^^^^^^^^^^^^^^ + +Resources are identified by a resource name or an alias. Aliases of a +resource, if present, can be identified by the alias field in the +resource of a :ref:`DeltaDiscoveryResponse `. The resource name will be +returned in the name field in the resource of a +:ref:`DeltaDiscoveryResponse `. + +Subscribing to Resources +^^^^^^^^^^^^^^^^^^^^^^^^ + +The client can send either an alias or the name of a resource in the +:ref:`resource_names_subscribe ` field of a :ref:`DeltaDiscoveryRequest ` in +order to subscribe to a resource. Both the names and aliases of +resources should be checked in order to determine whether the entity in +question has been subscribed to. + +A :ref:`resource_names_subscribe ` field may contain resource names that the +server believes the client is already subscribed to, and furthermore has +the most recent versions of. However, the server *must* still provide +those resources in the response; due to implementation details hidden +from the server, the client may have "forgotten" those resources despite +apparently remaining subscribed. + +Unsubscribing from Resources +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When a client loses interest in some resources, it will indicate that +with the :ref:`resource_names_unsubscribe ` field of a +:ref:`DeltaDiscoveryRequest `. As with :ref:`resource_names_subscribe `, these +may be resource names or aliases. + +A :ref:`resource_names_unsubscribe ` field may contain superfluous resource +names, which the server thought the client was already not subscribed +to. The server must cleanly process such a request; it can simply ignore +these phantom unsubscriptions. + +REST-JSON polling subscriptions +------------------------------- + +Synchronous (long) polling via REST endpoints is also available for the +xDS singleton APIs. The above sequencing of messages is similar, except +no persistent stream is maintained to the management server. It is +expected that there is only a single outstanding request at any point in +time, and as a result the response nonce is optional in REST-JSON. The +`JSON canonical transform of +proto3 `__ +is used to encode :ref:`DiscoveryRequest ` and :ref:`DiscoveryResponse ` +messages. ADS is not available for REST-JSON polling. + +When the poll period is set to a small value, with the intention of long +polling, then there is also a requirement to avoid sending a +:ref:`DiscoveryResponse ` :ref:`unless a change to the underlying resources has +occurred `. + +.. |Multiple EDS requests on the same stream| image:: diagrams/eds-same-stream.svg +.. |Multiple EDS requests on distinct streams| image:: diagrams/eds-distinct-stream.svg \ No newline at end of file diff --git a/docs/build.sh b/docs/build.sh index 6d6a88c2bb7a5..036ee5a67aaa7 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -32,6 +32,7 @@ else fi SCRIPT_DIR=$(dirname "$0") +API_DIR=$(dirname "$dir")/api BUILD_DIR=build_docs [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs [[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst @@ -159,6 +160,12 @@ do [ -n "${CPROFILE_ENABLED}" ] && cp -f bazel-bin/"${p}".profile "$(dirname "${DEST}")" done +mkdir -p ${GENERATED_RST_DIR}/api-docs + +cp -f $API_DIR/xds_protocol.rst "${GENERATED_RST_DIR}/api-docs/xds_protocol.rst" + +rsync -rav $API_DIR/diagrams "${GENERATED_RST_DIR}/api-docs" + rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${GENERATED_RST_DIR}" sphinx-build -W --keep-going -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" diff --git a/docs/root/api/api.rst b/docs/root/api/api.rst new file mode 100644 index 0000000000000..27e7731090095 --- /dev/null +++ b/docs/root/api/api.rst @@ -0,0 +1,11 @@ +.. _api: + +API +=== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../api-v2/api + ../api-docs/xds_protocol diff --git a/docs/root/index.rst b/docs/root/index.rst index 2e824f0c135a4..354b3578c8ed2 100644 --- a/docs/root/index.rst +++ b/docs/root/index.rst @@ -18,5 +18,5 @@ Envoy documentation configuration/configuration operations/operations extending/extending - api-v2/api + api/api faq/overview diff --git a/docs/root/intro/arch_overview/cluster_manager.rst b/docs/root/intro/arch_overview/cluster_manager.rst index 71739a4a302c8..8550d3a0655ba 100644 --- a/docs/root/intro/arch_overview/cluster_manager.rst +++ b/docs/root/intro/arch_overview/cluster_manager.rst @@ -25,6 +25,8 @@ distribution. * Cluster manager :ref:`configuration `. * CDS :ref:`configuration `. +.. _arch_overview_cluster_warming: + Cluster warming --------------- diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst index 9133726de1783..472683b2f9665 100644 --- a/docs/root/intro/intro.rst +++ b/docs/root/intro/intro.rst @@ -13,3 +13,4 @@ Introduction getting_help version_history deprecated + \ No newline at end of file From 629bbfb8960911b968c61f103e462c347b21d7ad Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Mon, 22 Apr 2019 15:47:36 -0400 Subject: [PATCH 165/165] stats: add/test heterogenous set of StatNameStorage objects. (#6504) * Adds SharedStatNameStorageSet. Signed-off-by: Joshua Marantz --- source/common/stats/symbol_table_impl.cc | 30 ++++++++ source/common/stats/symbol_table_impl.h | 82 ++++++++++++++++++++- test/common/stats/symbol_table_impl_test.cc | 27 +++++++ 3 files changed, 138 insertions(+), 1 deletion(-) diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index 17948ee8e90fb..6a96a340d72c8 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -317,6 +317,36 @@ void StatNameStorage::free(SymbolTable& table) { bytes_.reset(); } +StatNameStorageSet::~StatNameStorageSet() { + // free() must be called before destructing StatNameStorageSet to decrement + // references to all symbols. + ASSERT(hash_set_.empty()); +} + +void StatNameStorageSet::free(SymbolTable& symbol_table) { + // We must free() all symbols referenced in the set, otherwise the symbols + // will leak when the flat_hash_map superclass is destructed. They cannot + // self-destruct without an explicit free() as each individual StatNameStorage + // object does not have a reference to the symbol table, which would waste 8 + // bytes per stat-name. The easiest way to safely free all the contents of the + // symbol table set is to use flat_hash_map::extract(), which removes and + // returns an element from the set without destructing the element + // immediately. This gives us a chance to call free() on each one before they + // are destroyed. + // + // There's a performance risk here, if removing elements via + // flat_hash_set::begin() is inefficient to use in a loop like this. One can + // imagine a hash-table implementation where the performance of this + // usage-model would be poor. However, tests with 100k elements appeared to + // run quickly when compiled for optimization, so at present this is not a + // performance issue. + + while (!hash_set_.empty()) { + auto storage = hash_set_.extract(hash_set_.begin()); + storage.value().free(symbol_table); + } +} + SymbolTable::StoragePtr SymbolTableImpl::join(const std::vector& stat_names) const { uint64_t num_bytes = 0; for (StatName stat_name : stat_names) { diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index ade67b7a563e1..24390a4eee294 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -132,7 +132,7 @@ class SymbolTableImpl : public SymbolTable { bool lessThan(const StatName& a, const StatName& b) const override; void free(const StatName& stat_name) override; void incRefCount(const StatName& stat_name) override; - SymbolTable::StoragePtr join(const std::vector& stat_names) const override; + StoragePtr join(const std::vector& stat_names) const override; void populateList(const absl::string_view* names, uint32_t num_names, StatNameList& list) override; StoragePtr encode(absl::string_view name) override; @@ -476,5 +476,85 @@ struct StatNameLessThan { const SymbolTable& symbol_table_; }; +struct HeterogeneousStatNameHash { + // Specifying is_transparent indicates to the library infrastructure that + // type-conversions should not be applied when calling find(), but instead + // pass the actual types of the contained and searched-for objects directly to + // these functors. See + // https://en.cppreference.com/w/cpp/utility/functional/less_void for an + // official reference, and https://abseil.io/tips/144 for a description of + // using it in the context of absl. + using is_transparent = void; + + size_t operator()(StatName a) const { return a.hash(); } + size_t operator()(const StatNameStorage& a) const { return a.statName().hash(); } +}; + +struct HeterogeneousStatNameEqual { + // See description for HeterogeneousStatNameHash::is_transparent. + using is_transparent = void; + + size_t operator()(StatName a, StatName b) const { return a == b; } + size_t operator()(const StatNameStorage& a, const StatNameStorage& b) const { + return a.statName() == b.statName(); + } + size_t operator()(StatName a, const StatNameStorage& b) const { return a == b.statName(); } + size_t operator()(const StatNameStorage& a, StatName b) const { return a.statName() == b; } +}; + +// Encapsulates a set. We use containment here rather than a +// 'using' alias because we need to ensure that when the set is destructed, +// StatNameStorage::free(symbol_table) is called on each entry. It is a little +// easier at the call-sites in thread_local_store.cc to implement this an +// explicit free() method, analogous to StatNameStorage::free(), compared to +// storing a SymbolTable reference in the class and doing the free in the +// destructor, like StatNameTempStorage. +class StatNameStorageSet { +public: + using HashSet = + absl::flat_hash_set; + using iterator = HashSet::iterator; + + ~StatNameStorageSet(); + + /** + * Releases all symbols held in this set. Must be called prior to destruction. + * + * @param symbol_table The symbol table that owns the symbols. + */ + void free(SymbolTable& symbol_table); + + /** + * @param storage The StatNameStorage to add to the set. + */ + std::pair insert(StatNameStorage&& storage) { + return hash_set_.insert(std::move(storage)); + } + + /** + * @param stat_name The stat_name to find. + * @return the iterator pointing to the stat_name, or end() if not found. + */ + iterator find(StatName stat_name) { return hash_set_.find(stat_name); } + + /** + * @return the end-marker. + */ + iterator end() { return hash_set_.end(); } + + /** + * @param set the storage set to swap with. + */ + void swap(StatNameStorageSet& set) { hash_set_.swap(set.hash_set_); } + + /** + * @return the number of elements in the set. + */ + size_t size() const { return hash_set_.size(); } + +private: + HashSet hash_set_; +}; + } // namespace Stats } // namespace Envoy diff --git a/test/common/stats/symbol_table_impl_test.cc b/test/common/stats/symbol_table_impl_test.cc index 36c86aaf43654..7391f45ecfbb7 100644 --- a/test/common/stats/symbol_table_impl_test.cc +++ b/test/common/stats/symbol_table_impl_test.cc @@ -507,6 +507,33 @@ TEST_P(StatNameTest, MutexContentionOnExistingSymbols) { } } +TEST_P(StatNameTest, SharedStatNameStorageSetInsertAndFind) { + StatNameStorageSet set; + const int iters = 10; + for (int i = 0; i < iters; ++i) { + std::string foo = absl::StrCat("foo", i); + auto insertion = set.insert(StatNameStorage(foo, *table_)); + StatNameTempStorage temp_foo(foo, *table_); + auto found = set.find(temp_foo.statName()); + EXPECT_EQ(found->statName().data(), insertion.first->statName().data()); + } + StatNameTempStorage bar("bar", *table_); + EXPECT_EQ(set.end(), set.find(bar.statName())); + EXPECT_EQ(iters, set.size()); + set.free(*table_); +} + +TEST_P(StatNameTest, SharedStatNameStorageSetSwap) { + StatNameStorageSet set1, set2; + set1.insert(StatNameStorage("foo", *table_)); + EXPECT_EQ(1, set1.size()); + EXPECT_EQ(0, set2.size()); + set1.swap(set2); + EXPECT_EQ(0, set1.size()); + EXPECT_EQ(1, set2.size()); + set2.free(*table_); +} + // Tests the memory savings realized from using symbol tables with 1k // clusters. This test shows the memory drops from almost 8M to less than // 2M. Note that only SymbolTableImpl is tested for memory consumption,