From a38c2151a9addf4dfebc4f1036165f2d133b946f Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Fri, 13 Dec 2019 22:53:08 -0500 Subject: [PATCH 01/41] http: extract conn pool behavior into base class This extracts shared functionality (connection timeouts, drain callbacks) into the base class. It also introduces a base class for the ActiveClient, opening up for moving shared client functionality into the base class as well. Moves the handling of the ready/busy lists up to the base class, setting us up for reusing these for HTTP/2. Signed-off-by: Snow Pettersen --- source/common/common/logger.h | 3 +++ source/common/http/conn_pool_base.cc | 25 ++++++++++++++++++ source/common/http/conn_pool_base.h | 34 +++++++++++++++++++++--- source/common/http/http1/conn_pool.cc | 38 ++++++++------------------- source/common/http/http1/conn_pool.h | 17 +++++------- source/common/http/http2/conn_pool.cc | 22 +++++----------- source/common/http/http2/conn_pool.h | 14 +++++----- 7 files changed, 89 insertions(+), 64 deletions(-) diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 810c5fff52fa2..9222d6a1ff2ee 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -370,6 +370,9 @@ template class Loggable { #define ENVOY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ...) \ ENVOY_CONN_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, FORMAT, CONNECTION, ##__VA_ARGS__) +#define ENVOY_CONN_ID_LOG(LEVEL, FORMAT, CONNECTION_ID, ...) \ + ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, "[C{}] " FORMAT, (CONNECTION_ID), ##__VA_ARGS__) + /** * Convenience macros for logging with a stream ID and a connection ID. */ diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 1cec3f6a74dc2..cbf3ac3317ef0 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -2,6 +2,31 @@ namespace Envoy { namespace Http { + +ConnPoolImplBase::~ConnPoolImplBase() { + while (!ready_clients_.empty()) { + ready_clients_.front()->close(); + } + + while (!busy_clients_.empty()) { + busy_clients_.front()->close(); + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +void ConnPoolImplBase::onConnectTimeout(ConnPoolImplBase::ActiveClient& client) { + ENVOY_CONN_ID_LOG(debug, "connect timeout", client.connectionId()); + host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + client.close(); +} + ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks) : parent_(parent), decoder_(decoder), callbacks_(callbacks) { diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 0910cadea1a50..835a9fca4637f 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -1,20 +1,29 @@ #pragma once +#include "envoy/event/dispatcher.h" #include "envoy/http/conn_pool.h" #include "common/common/linked_object.h" #include "absl/strings/string_view.h" +#include "envoy/network/connection.h" namespace Envoy { namespace Http { // Base class that handles request queueing logic shared between connection pool implementations. -class ConnPoolImplBase : protected Logger::Loggable { +class ConnPoolImplBase : public ConnectionPool::Instance, + protected Logger::Loggable { + + // ConnectionPool::Instance + void addDrainedCallback(DrainedCb cb) override; + protected: - ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority) - : host_(host), priority_(priority) {} - virtual ~ConnPoolImplBase() = default; + ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher) + : host_(host), priority_(priority), dispatcher_(dispatcher) {} + virtual ~ConnPoolImplBase(); + struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, @@ -31,6 +40,16 @@ class ConnPoolImplBase : protected Logger::Loggable { using PendingRequestPtr = std::unique_ptr; + struct ActiveClient : LinkedObject, public Event::DeferredDeletable { + virtual ~ActiveClient() = default; + + virtual void close() PURE; + + virtual uint64_t connectionId() const PURE; + }; + + using ActiveClientPtr = std::unique_ptr; + // Creates a new PendingRequest and enqueues it into the request queue. ConnectionPool::Cancellable* newPendingRequest(StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks); @@ -42,15 +61,22 @@ class ConnPoolImplBase : protected Logger::Loggable { void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason); + void onConnectTimeout(ActiveClient& client); + // Must be implemented by sub class. Attempts to drain inactive clients. virtual void checkForDrained() PURE; const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; + Event::Dispatcher& dispatcher_; + + std::list drained_callbacks_; std::list pending_requests_; // When calling purgePendingRequests, this list will be used to hold the requests we are about // to purge. We need this if one cancelled requests cancels a different pending request std::list pending_requests_to_purge_; + std::list ready_clients_; + std::list busy_clients_; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 4fa6e04dfffd0..8dac8e52db239 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -30,41 +30,25 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha const Network::ConnectionSocket::OptionsSharedPtr& options, const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options), + : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher), socket_options_(options), + transport_socket_options_(transport_socket_options), upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })), settings_(settings) {} -ConnPoolImpl::~ConnPoolImpl() { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - while (!busy_clients_.empty()) { - busy_clients_.front()->codec_client_->close(); - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} +ConnPoolImpl::~ConnPoolImpl() {} void ConnPoolImpl::drainConnections() { while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); + firstReady().codec_client_->close(); } // We drain busy clients by manually setting remaining requests to 1. Thus, when the next // response completes the client will be destroyed. for (const auto& client : busy_clients_) { - client->remaining_requests_ = 1; + static_cast(*client).remaining_requests_ = 1; } } -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - bool ConnPoolImpl::hasActiveConnections() const { return !pending_requests_.empty() || !busy_clients_.empty(); } @@ -82,7 +66,7 @@ void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& re void ConnPoolImpl::checkForDrained() { if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_clients_.empty()) { while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); + firstReady().codec_client_->close(); } for (const DrainedCb& cb : drained_callbacks_) { @@ -93,16 +77,16 @@ void ConnPoolImpl::checkForDrained() { void ConnPoolImpl::createNewConnection() { ENVOY_LOG(debug, "creating a new connection"); - ActiveClientPtr client(new ActiveClient(*this)); + auto client = std::make_unique(*this); client->moveIntoList(std::move(client), busy_clients_); } ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { if (!ready_clients_.empty()) { - ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); - ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); - attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); + firstReady().moveBetweenLists(ready_clients_, busy_clients_); + ENVOY_CONN_LOG(debug, "using existing connection", *firstBusy().codec_client_); + attachRequestToClient(firstBusy(), response_decoder, callbacks); return nullptr; } @@ -225,7 +209,7 @@ void ConnPoolImpl::onResponseComplete(ActiveClient& client) { void ConnPoolImpl::onUpstreamReady() { upstream_ready_enabled_ = false; while (!pending_requests_.empty() && !ready_clients_.empty()) { - ActiveClient& client = *ready_clients_.front(); + ActiveClient& client = firstReady(); ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); // There is work to do so bind a request to the client and move it to the busy list. Pending // requests are pushed onto the front, so pull from the back. diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index b650ae9b60772..ab5055d5ef6e0 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -41,7 +41,6 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { // ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http11; } - void addDrainedCallback(DrainedCb cb) override; void drainConnections() override; bool hasActiveConnections() const override; ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, @@ -83,12 +82,14 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { using StreamWrapperPtr = std::unique_ptr; - struct ActiveClient : LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable { + struct ActiveClient : public Network::ConnectionCallbacks, public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); ~ActiveClient() override; + // ConnPoolImplBase::ActiveClient + void close() override { codec_client_->close(); } + uint64_t connectionId() override { codec_client_->id(); } + void onConnectTimeout(); // Network::ConnectionCallbacks @@ -108,8 +109,6 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { uint64_t remaining_requests_; }; - using ActiveClientPtr = std::unique_ptr; - void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; @@ -119,11 +118,9 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { void onResponseComplete(ActiveClient& client); void onUpstreamReady(); void processIdleClient(ActiveClient& client, bool delay); + ActiveClient& firstReady() const { return static_cast(*ready_clients_.front()); } + ActiveClient& firstBusy() const { return static_cast(*busy_clients_.front()); } - Event::Dispatcher& dispatcher_; - std::list ready_clients_; - std::list busy_clients_; - std::list drained_callbacks_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; Event::TimerPtr upstream_ready_timer_; diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index feac31813d754..6052dbe886156 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -20,10 +20,14 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options) {} + : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher), socket_options_(options), + transport_socket_options_(transport_socket_options) {} ConnPoolImpl::~ConnPoolImpl() { + // TODO(snowp): Change the HTTP/2 conn pool to use the ready_clients_ from the base + // implementation. We probably need a third list for HTTP/2 (draining_clients_) in anticipation + // for there to be a distinction between a client that is being shut down (draining) and one that + // has reached its capacity (busy). if (primary_client_) { primary_client_->client_->close(); } @@ -31,9 +35,6 @@ ConnPoolImpl::~ConnPoolImpl() { if (draining_client_) { draining_client_->client_->close(); } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); } void ConnPoolImpl::ConnPoolImpl::drainConnections() { @@ -42,11 +43,6 @@ void ConnPoolImpl::ConnPoolImpl::drainConnections() { } } -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - bool ConnPoolImpl::hasActiveConnections() const { if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { return true; @@ -217,12 +213,6 @@ void ConnPoolImpl::movePrimaryClientToDraining() { ASSERT(!primary_client_); } -void ConnPoolImpl::onConnectTimeout(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "connect timeout", *client.client_); - host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - client.client_->close(); -} - void ConnPoolImpl::onGoAway(ActiveClient& client) { ENVOY_CONN_LOG(debug, "remote goaway", *client.client_); host_->cluster().stats().upstream_cx_close_notify_.inc(); diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 55bb4544d5a4c..18bf6b20e8d24 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -22,7 +22,7 @@ namespace Http2 { * shifting to a new connection if we reach max streams on the primary. This is a base class * used for both the prod implementation as well as the testing one. */ -class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { +class ConnPoolImpl : public ConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -32,7 +32,6 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { // Http::ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http2; } - void addDrainedCallback(DrainedCb cb) override; void drainConnections() override; bool hasActiveConnections() const override; ConnectionPool::Cancellable* newStream(Http::StreamDecoder& response_decoder, @@ -42,13 +41,17 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { protected: struct ActiveClient : public Network::ConnectionCallbacks, public CodecClientCallbacks, - public Event::DeferredDeletable, - public Http::ConnectionCallbacks { + public Http::ConnectionCallbacks, + public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); ~ActiveClient() override; void onConnectTimeout() { parent_.onConnectTimeout(*this); } + // ConnPoolImpl::ActiveClient + void close() override { client_->close(); } + uint64_t connectionId() const override { return client_->id(); } + // Network::ConnectionCallbacks void onEvent(Network::ConnectionEvent event) override { parent_.onConnectionEvent(*this, event); @@ -84,7 +87,6 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { virtual uint32_t maxTotalStreams() PURE; void movePrimaryClientToDraining(); void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); - void onConnectTimeout(ActiveClient& client); void onGoAway(ActiveClient& client); void onStreamDestroy(ActiveClient& client); void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); @@ -92,10 +94,8 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { void onUpstreamReady(); Stats::TimespanPtr conn_connect_ms_; - Event::Dispatcher& dispatcher_; ActiveClientPtr primary_client_; ActiveClientPtr draining_client_; - std::list drained_callbacks_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; }; From c6ca9b7fe783ec47145daaac7ddeab776ee1a177 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 20 Dec 2019 14:41:19 -0800 Subject: [PATCH 02/41] Fix build: logger Signed-off-by: Greg Greenway --- source/common/common/logger.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 1811686d17285..29787f3e4b935 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -371,8 +371,8 @@ template class Loggable { #define ENVOY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ...) \ ENVOY_CONN_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, FORMAT, CONNECTION, ##__VA_ARGS__) -#define ENVOY_CONN_ID_LOG(LEVEL, FORMAT, CONNECTION_ID, ...) \ - ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, "[C{}] " FORMAT, (CONNECTION_ID), ##__VA_ARGS__) +#define ENVOY_CONN_ID_LOG(LEVEL, FORMAT, CONNECTION_ID, ...) \ + ENVOY_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, "[C{}] " FORMAT, (CONNECTION_ID), ##__VA_ARGS__) /** * Convenience macros for logging with a stream ID and a connection ID. From 249770b790a0a015a4b9e6ff369579f8bf6a8f75 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 20 Dec 2019 16:02:10 -0800 Subject: [PATCH 03/41] fix tests build Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 835a9fca4637f..776cc17d5586a 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -14,7 +14,7 @@ namespace Http { // Base class that handles request queueing logic shared between connection pool implementations. class ConnPoolImplBase : public ConnectionPool::Instance, protected Logger::Loggable { - +public: // ConnectionPool::Instance void addDrainedCallback(DrainedCb cb) override; @@ -24,7 +24,6 @@ class ConnPoolImplBase : public ConnectionPool::Instance, : host_(host), priority_(priority), dispatcher_(dispatcher) {} virtual ~ConnPoolImplBase(); - struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks); From 579dbefb2ff88ad5e8744d60a276afbbf5ca9339 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 20 Dec 2019 16:46:18 -0800 Subject: [PATCH 04/41] more build fixes Signed-off-by: Greg Greenway --- source/common/http/http1/conn_pool.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index ab5055d5ef6e0..cb7f7fd283acd 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -29,7 +29,7 @@ namespace Http1 { * address. Higher layer code should handle resolving DNS on error and creating a new pool * bound to a different IP address. */ -class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { +class ConnPoolImpl : public ConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -88,7 +88,7 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { // ConnPoolImplBase::ActiveClient void close() override { codec_client_->close(); } - uint64_t connectionId() override { codec_client_->id(); } + uint64_t connectionId() const override { return codec_client_->id(); } void onConnectTimeout(); From e7a0587b703159d54479285267d832250abe661e Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Tue, 7 Jan 2020 09:23:53 -0800 Subject: [PATCH 05/41] Improve docs for LinkedObject::moveBetweenLists Signed-off-by: Greg Greenway --- source/common/common/linked_object.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/common/common/linked_object.h b/source/common/common/linked_object.h index 2efbc14cf6b76..13fc6d491567b 100644 --- a/source/common/common/linked_object.h +++ b/source/common/common/linked_object.h @@ -28,15 +28,15 @@ template class LinkedObject { bool inserted() { return inserted_; } /** - * Move a linked item between 2 lists. - * @param list1 supplies the first list. - * @param list2 supplies the second list. + * Move a linked item from src list to dst list. + * @param src supplies the list that the item is currently in. + * @param dst supplies the destination list for the item. */ - void moveBetweenLists(ListType& list1, ListType& list2) { + void moveBetweenLists(ListType& src, ListType& dst) { ASSERT(inserted_); - ASSERT(std::find(list1.begin(), list1.end(), *entry_) != list1.end()); + ASSERT(std::find(src.begin(), src.end(), *entry_) != src.end()); - list2.splice(list2.begin(), list1, entry_); + dst.splice(dst.begin(), src, entry_); } /** From 4e2c147f5ee9f0596d3f1f7fe9c37d19cfd88e99 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Tue, 7 Jan 2020 09:25:39 -0800 Subject: [PATCH 06/41] Phase one: moved some logic to base class. Signed-off-by: Greg Greenway --- source/common/http/BUILD | 4 + source/common/http/conn_pool_base.cc | 181 +++++++++++++++++++++-- source/common/http/conn_pool_base.h | 54 ++++++- source/common/http/http1/BUILD | 1 - source/common/http/http1/conn_pool.cc | 117 +-------------- source/common/http/http1/conn_pool.h | 28 +--- source/common/http/http2/BUILD | 1 - source/common/http/http2/conn_pool.cc | 200 ++++++-------------------- source/common/http/http2/conn_pool.h | 46 +++--- 9 files changed, 288 insertions(+), 344 deletions(-) diff --git a/source/common/http/BUILD b/source/common/http/BUILD index f82cad602bf73..4943b9328777a 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -115,8 +115,12 @@ envoy_cc_library( srcs = ["conn_pool_base.cc"], hdrs = ["conn_pool_base.h"], deps = [ + ":codec_client_lib", "//include/envoy/http:conn_pool_interface", + "//include/envoy/stats:timespan_interface", "//source/common/common:linked_object", + "//source/common/stats:timespan_lib", + "//source/common/upstream:upstream_lib", ], ) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index cbf3ac3317ef0..31771d8541b71 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -1,15 +1,15 @@ #include "common/http/conn_pool_base.h" +#include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" namespace Envoy { namespace Http { ConnPoolImplBase::~ConnPoolImplBase() { - while (!ready_clients_.empty()) { - ready_clients_.front()->close(); - } - - while (!busy_clients_.empty()) { - busy_clients_.front()->close(); + for (auto* list : {&ready_clients_, &busy_clients_, &draining_clients_}) { + while (!list->empty()) { + list->front()->close(); + } } // Make sure all clients are destroyed before we are destroyed. @@ -21,10 +21,154 @@ void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { checkForDrained(); } -void ConnPoolImplBase::onConnectTimeout(ConnPoolImplBase::ActiveClient& client) { - ENVOY_CONN_ID_LOG(debug, "connect timeout", client.connectionId()); - host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - client.close(); +bool ConnPoolImplBase::hasActiveConnections() const { + if (!pending_requests_.empty() || !busy_clients_.empty() || !draining_clients_.empty()) { + return true; + } + + for (const auto& ready_client : ready_clients_) { + if (ready_client->hasActiveRequests()) { + return true; + } + } + + return false; +} + +void ConnPoolImplBase::closeIdleConnections() { + // Create a separate list of elements to close to avoid mutate-while-iterating problems. + std::list toClose; + + // Possibly-idle connections are always in the ready_clients_ list + for (auto& client : ready_clients_) { + if (client->hasActiveRequests()) { + toClose.push_back(&client); + } + } + + while (!toClose.empty()) { + (*toClose.front())->close(); + } +} + +std::list* ConnPoolImplBase::owningList(ActiveClient& client) { + switch (client.state_) { + case ActiveClient::State::CONNECTING: + return &busy_clients_; + case ActiveClient::State::READY: + return &ready_clients_; + case ActiveClient::State::BUSY: + return &busy_clients_; + case ActiveClient::State::DRAINING: + return &draining_clients_; + case ActiveClient::State::CLOSED: + return nullptr; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void ConnPoolImplBase::drainConnections() { + closeIdleConnections(); + + while (!busy_clients_.empty()) { + auto& client = busy_clients_.front(); + client->state_ = ConnPoolImplBase::ActiveClient::State::DRAINING; + client->moveBetweenLists(busy_clients_, draining_clients_); + } +} + +void ConnPoolImplBase::checkForDrained() { + if (drained_callbacks_.empty()) { + return; + } + + closeIdleConnections(); + + if (pending_requests_.empty() && busy_clients_.empty() && draining_clients_.empty()) { + ENVOY_LOG(debug, "invoking drained callbacks"); + for (const DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, + Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + // The client died. + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + if (client.closingWithIncompleteRequest()) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + ActiveClientPtr removed; + switch (client.state_) { + case ActiveClient::State::CONNECTING: + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + purgePendingRequests(client.real_host_description_, + client.codec_client_->connectionFailureReason()); + break; + + case ConnPoolImplBase::ActiveClient::State::READY: + dispatcher_.deferredDelete(client.removeFromList(ready_clients_)); + if (client.closingWithIncompleteRequest()) { + checkForDrained(); + } + break; + + case ConnPoolImplBase::ActiveClient::State::BUSY: + dispatcher_.deferredDelete(client.removeFromList(busy_clients_)); + checkForDrained(); + break; + + case ConnPoolImplBase::ActiveClient::State::DRAINING: + dispatcher_.deferredDelete(client.removeFromList(draining_clients_)); + checkForDrained(); + break; + + case ConnPoolImplBase::ActiveClient::State::CLOSED: + PANIC("Connection closed event received while already in CLOSED state."); + break; + } + // bool was_draining = (client.state_ == ConnPoolImplBase::ActiveClient::State::DRAINING); + client.state_ = ConnPoolImplBase::ActiveClient::State::CLOSED; + + // If we have pending requests and we just lost a connection we should make a new one. + // TODO(ggreenway): This math only works for http1; figure out how many request slots there + // are in ready_clients_, and find the CONNECTING clients in busy_clients_ and count them as + // available. + /* + if (!was_draining && pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) + { createNewConnection(); + } + */ + } else if (event == Network::ConnectionEvent::Connected) { + client.conn_connect_ms_->complete(); + client.conn_connect_ms_.reset(); + + ASSERT(client.state_ == ConnPoolImplBase::ActiveClient::State::CONNECTING); + client.state_ = ConnPoolImplBase::ActiveClient::State::READY; + client.moveBetweenLists(busy_clients_, ready_clients_); + + onUpstreamReady(); + } + + if (client.connect_timer_) { + client.connect_timer_->disableTimer(); + client.connect_timer_.reset(); + } } ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, @@ -79,5 +223,22 @@ void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { checkForDrained(); } +ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent) + : base_parent_(parent), connect_timer_(base_parent_.dispatcher_.createTimer( + [this]() -> void { onConnectTimeout(); })) { + conn_connect_ms_ = std::make_unique( + base_parent_.host_->cluster().stats().upstream_cx_connect_ms_, + base_parent_.dispatcher_.timeSource()); + conn_length_ = std::make_unique( + base_parent_.host_->cluster().stats().upstream_cx_length_ms_, + base_parent_.dispatcher_.timeSource()); +} + +void ConnPoolImplBase::ActiveClient::onConnectTimeout() { + ENVOY_CONN_ID_LOG(debug, "connect timeout", connectionId()); + base_parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + close(); +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 776cc17d5586a..3a87191710749 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -2,8 +2,10 @@ #include "envoy/event/dispatcher.h" #include "envoy/http/conn_pool.h" +#include "envoy/stats/timespan.h" #include "common/common/linked_object.h" +#include "common/http/codec_client.h" #include "absl/strings/string_view.h" #include "envoy/network/connection.h" @@ -17,6 +19,8 @@ class ConnPoolImplBase : public ConnectionPool::Instance, public: // ConnectionPool::Instance void addDrainedCallback(DrainedCb cb) override; + bool hasActiveConnections() const override; + void drainConnections() override; protected: ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -39,16 +43,42 @@ class ConnPoolImplBase : public ConnectionPool::Instance, using PendingRequestPtr = std::unique_ptr; - struct ActiveClient : LinkedObject, public Event::DeferredDeletable { + struct ActiveClient : LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable { + ActiveClient(ConnPoolImplBase& parent); virtual ~ActiveClient() = default; - virtual void close() PURE; - - virtual uint64_t connectionId() const PURE; + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override { + base_parent_.onConnectionEvent(*this, event); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + void onConnectTimeout(); + void close() { codec_client_->close(); } + + uint64_t connectionId() const { return codec_client_->id(); } + + virtual bool hasActiveRequests() const PURE; + virtual bool closingWithIncompleteRequest() const PURE; + + enum class State { CONNECTING, READY, BUSY, DRAINING, CLOSED }; + State state_{State::CONNECTING}; + ConnPoolImplBase& base_parent_; // TODO: combine this and child parent_ + CodecClientPtr codec_client_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + Stats::TimespanPtr conn_connect_ms_; + Stats::TimespanPtr conn_length_; + Event::TimerPtr connect_timer_; }; using ActiveClientPtr = std::unique_ptr; + // Gets a pointer to the list that currently owns this client. + std::list* owningList(ActiveClient& client); + // Creates a new PendingRequest and enqueues it into the request queue. ConnectionPool::Cancellable* newPendingRequest(StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks); @@ -60,10 +90,14 @@ class ConnPoolImplBase : public ConnectionPool::Instance, void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason); - void onConnectTimeout(ActiveClient& client); + // Closes any idle connections. + void closeIdleConnections(); + + void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); // Must be implemented by sub class. Attempts to drain inactive clients. - virtual void checkForDrained() PURE; + void checkForDrained(); + virtual void onUpstreamReady() PURE; const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; @@ -71,11 +105,19 @@ class ConnPoolImplBase : public ConnectionPool::Instance, std::list drained_callbacks_; std::list pending_requests_; + // When calling purgePendingRequests, this list will be used to hold the requests we are about // to purge. We need this if one cancelled requests cancels a different pending request std::list pending_requests_to_purge_; + + // Clients that are ready to handle additional requests. std::list ready_clients_; + + // Clients that are not ready to handle additional requests. std::list busy_clients_; + + // Clients that are draining but have not completed all outstanding requests yet. + std::list draining_clients_; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index aca8bd1d8d0e0..aa56ba332bbc6 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -67,7 +67,6 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", - "//source/common/stats:timespan_lib", "//source/common/upstream:upstream_lib", ], ) diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 8dac8e52db239..c4f5d124eeac1 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -16,8 +16,6 @@ #include "common/http/headers.h" #include "common/network/utility.h" #include "common/runtime/runtime_impl.h" -#include "common/stats/timespan_impl.h" -#include "common/upstream/upstream_impl.h" #include "absl/strings/match.h" @@ -37,22 +35,6 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha ConnPoolImpl::~ConnPoolImpl() {} -void ConnPoolImpl::drainConnections() { - while (!ready_clients_.empty()) { - firstReady().codec_client_->close(); - } - - // We drain busy clients by manually setting remaining requests to 1. Thus, when the next - // response completes the client will be destroyed. - for (const auto& client : busy_clients_) { - static_cast(*client).remaining_requests_ = 1; - } -} - -bool ConnPoolImpl::hasActiveConnections() const { - return !pending_requests_.empty() || !busy_clients_.empty(); -} - void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { ASSERT(!client.stream_wrapper_); @@ -63,27 +45,17 @@ void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& re client.codec_client_->streamInfo()); } -void ConnPoolImpl::checkForDrained() { - if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_clients_.empty()) { - while (!ready_clients_.empty()) { - firstReady().codec_client_->close(); - } - - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - void ConnPoolImpl::createNewConnection() { ENVOY_LOG(debug, "creating a new connection"); auto client = std::make_unique(*this); + client->state_ = ConnPoolImplBase::ActiveClient::State::BUSY; client->moveIntoList(std::move(client), busy_clients_); } ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { if (!ready_clients_.empty()) { + firstReady().state_ = ConnPoolImplBase::ActiveClient::State::BUSY; firstReady().moveBetweenLists(ready_clients_, busy_clients_); ENVOY_CONN_LOG(debug, "using existing connection", *firstBusy().codec_client_); attachRequestToClient(firstBusy(), response_decoder, callbacks); @@ -112,75 +84,6 @@ ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_dec } } -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - // The client died. - ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - ActiveClientPtr removed; - bool check_for_drained = true; - if (client.stream_wrapper_) { - if (!client.stream_wrapper_->decode_complete_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - // There is an active request attached to this client. The underlying codec client will - // already have "reset" the stream to fire the reset callback. All we do here is just - // destroy the client. - removed = client.removeFromList(busy_clients_); - } else if (!client.connect_timer_) { - // The connect timer is destroyed on connect. The lack of a connect timer means that this - // client is idle and in the ready pool. - removed = client.removeFromList(ready_clients_); - check_for_drained = false; - } else { - // The only time this happens is if we actually saw a connect failure. - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - removed = client.removeFromList(busy_clients_); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - ENVOY_CONN_LOG(debug, "purge pending, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - purgePendingRequests(client.real_host_description_, - client.codec_client_->connectionFailureReason()); - } - - dispatcher_.deferredDelete(std::move(removed)); - - // If we have pending requests and we just lost a connection we should make a new one. - if (pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) { - createNewConnection(); - } - - if (check_for_drained) { - checkForDrained(); - } - } - - if (client.connect_timer_) { - client.connect_timer_->disableTimer(); - client.connect_timer_.reset(); - } - - // Note that the order in this function is important. Concretely, we must destroy the connect - // timer before we process a connected idle client, because if this results in an immediate - // drain/destruction event, we key off of the existence of the connect timer above to determine - // whether the client is in the ready list (connected) or the busy list (failed to connect). - if (event == Network::ConnectionEvent::Connected) { - client.conn_connect_ms_->complete(); - client.conn_connect_ms_.reset(); - processIdleClient(client, false); - } -} - void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { // If we get a downstream reset to an attached client, we just blow it away. client.codec_client_->close(); @@ -236,6 +139,7 @@ void ConnPoolImpl::processIdleClient(ActiveClient& client, bool delay) { pending_requests_.pop_back(); } + // if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { upstream_ready_enabled_ = true; upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); @@ -287,12 +191,9 @@ void ConnPoolImpl::StreamWrapper::onDecodeComplete() { } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : parent_(parent), - connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })), + : ConnPoolImplBase::ActiveClient(parent), parent_(parent), remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { - conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); real_host_description_ = data.host_description_; @@ -304,8 +205,6 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); parent_.host_->stats().cx_total_.inc(); parent_.host_->stats().cx_active_.inc(); - conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); @@ -324,14 +223,6 @@ ConnPoolImpl::ActiveClient::~ActiveClient() { parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); } -void ConnPoolImpl::ActiveClient::onConnectTimeout() { - // We just close the client at this point. This will result in both a timeout and a connect - // failure and will fold into all the normal connect failure logic. - ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - codec_client_->close(); -} - CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_), data.host_description_, dispatcher_)}; diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index cb7f7fd283acd..20f35bec33bdd 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -9,11 +9,9 @@ #include "envoy/http/codec.h" #include "envoy/http/conn_pool.h" #include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" #include "envoy/upstream/upstream.h" #include "common/common/linked_object.h" -#include "common/http/codec_client.h" #include "common/http/codec_wrappers.h" #include "common/http/conn_pool_base.h" @@ -41,15 +39,10 @@ class ConnPoolImpl : public ConnPoolImplBase { // ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http11; } - void drainConnections() override; - bool hasActiveConnections() const override; ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) override; Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - // ConnPoolImplBase - void checkForDrained() override; - protected: struct ActiveClient; @@ -82,30 +75,18 @@ class ConnPoolImpl : public ConnPoolImplBase { using StreamWrapperPtr = std::unique_ptr; - struct ActiveClient : public Network::ConnectionCallbacks, public ConnPoolImplBase::ActiveClient { + struct ActiveClient : public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); ~ActiveClient() override; // ConnPoolImplBase::ActiveClient - void close() override { codec_client_->close(); } - uint64_t connectionId() const override { return codec_client_->id(); } - - void onConnectTimeout(); - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); + bool hasActiveRequests() const override { return stream_wrapper_ != nullptr; } + bool closingWithIncompleteRequest() const override { + return (stream_wrapper_ != nullptr) && (!stream_wrapper_->decode_complete_); } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} ConnPoolImpl& parent_; - CodecClientPtr codec_client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; StreamWrapperPtr stream_wrapper_; - Event::TimerPtr connect_timer_; - Stats::TimespanPtr conn_connect_ms_; - Stats::TimespanPtr conn_length_; uint64_t remaining_requests_; }; @@ -113,7 +94,6 @@ class ConnPoolImpl : public ConnPoolImplBase { ConnectionPool::Callbacks& callbacks); virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; void createNewConnection(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); void onDownstreamReset(ActiveClient& client); void onResponseComplete(ActiveClient& client); void onUpstreamReady(); diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index ba48dc67903fb..cb80a566c5aaf 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -70,7 +70,6 @@ envoy_cc_library( "//source/common/http:codec_client_lib", "//source/common/http:conn_pool_base_lib", "//source/common/network:utility_lib", - "//source/common/stats:timespan_lib", "//source/common/upstream:upstream_lib", ], ) diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 6052dbe886156..7c6a5228f0f9c 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -9,8 +9,6 @@ #include "common/http/http2/codec_impl.h" #include "common/network/utility.h" -#include "common/stats/timespan_impl.h" -#include "common/upstream/upstream_impl.h" namespace Envoy { namespace Http { @@ -23,67 +21,7 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher), socket_options_(options), transport_socket_options_(transport_socket_options) {} -ConnPoolImpl::~ConnPoolImpl() { - // TODO(snowp): Change the HTTP/2 conn pool to use the ready_clients_ from the base - // implementation. We probably need a third list for HTTP/2 (draining_clients_) in anticipation - // for there to be a distinction between a client that is being shut down (draining) and one that - // has reached its capacity (busy). - if (primary_client_) { - primary_client_->client_->close(); - } - - if (draining_client_) { - draining_client_->client_->close(); - } -} - -void ConnPoolImpl::ConnPoolImpl::drainConnections() { - if (primary_client_ != nullptr) { - movePrimaryClientToDraining(); - } -} - -bool ConnPoolImpl::hasActiveConnections() const { - if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { - return true; - } - - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - return true; - } - - return !pending_requests_.empty(); -} - -void ConnPoolImpl::checkForDrained() { - if (drained_callbacks_.empty()) { - return; - } - - bool drained = true; - if (primary_client_) { - if (primary_client_->client_->numActiveRequests() == 0) { - primary_client_->client_->close(); - ASSERT(!primary_client_); - } else { - drained = false; - } - } - - ASSERT(!draining_client_ || (draining_client_->client_->numActiveRequests() > 0)); - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - drained = false; - } - - if (drained) { - ENVOY_LOG(debug, "invoking drained callbacks"); - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::newClientStream(Http::StreamDecoder& response_decoder, +void ConnPoolImpl::newClientStream(ActiveClient& client, Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) { if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { ENVOY_LOG(debug, "max requests overflow"); @@ -91,16 +29,15 @@ void ConnPoolImpl::newClientStream(Http::StreamDecoder& response_decoder, nullptr); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); } else { - ENVOY_CONN_LOG(debug, "creating stream", *primary_client_->client_); - primary_client_->total_streams_++; + ENVOY_CONN_LOG(debug, "creating stream", *client.codec_client_); + client.total_streams_++; host_->stats().rq_total_.inc(); host_->stats().rq_active_.inc(); host_->cluster().stats().upstream_rq_total_.inc(); host_->cluster().stats().upstream_rq_active_.inc(); host_->cluster().resourceManager(priority_).requests().inc(); - callbacks.onPoolReady(primary_client_->client_->newStream(response_decoder), - primary_client_->real_host_description_, - primary_client_->client_->streamInfo()); + callbacks.onPoolReady(client.codec_client_->newStream(response_decoder), + client.real_host_description_, client.codec_client_->streamInfo()); } } @@ -114,16 +51,17 @@ ConnectionPool::Cancellable* ConnPoolImpl::newStream(Http::StreamDecoder& respon max_streams = maxTotalStreams(); } - if (primary_client_ && primary_client_->total_streams_ >= max_streams) { + if (primaryClient() && primaryClient()->total_streams_ >= max_streams) { movePrimaryClientToDraining(); } - if (!primary_client_) { - primary_client_ = std::make_unique(*this); + if (!primaryClient()) { + auto client = std::make_unique(*this); + client->moveIntoList(std::move(client), busy_clients_); } - // If the primary client is not connected yet, queue up the request. - if (!primary_client_->upstream_ready_) { + // If there are no ready clients yet, queue up the request. + if (ready_clients_.empty()) { // If we're not allowed to enqueue more requests, fail fast. if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { ENVOY_LOG(debug, "max pending requests overflow"); @@ -138,98 +76,43 @@ ConnectionPool::Cancellable* ConnPoolImpl::newStream(Http::StreamDecoder& respon // We already have an active client that's connected to upstream, so attempt to establish a // new stream. - newClientStream(response_decoder, callbacks); + newClientStream(*primaryClient(), response_decoder, callbacks); return nullptr; } -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - ENVOY_CONN_LOG(debug, "client disconnected", *client.client_); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - if (client.closed_with_active_rq_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - if (client.connect_timer_) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_, - client.client_->connectionFailureReason()); - } - - if (&client == primary_client_.get()) { - ENVOY_CONN_LOG(debug, "destroying primary client", *client.client_); - dispatcher_.deferredDelete(std::move(primary_client_)); - } else { - ENVOY_CONN_LOG(debug, "destroying draining client", *client.client_); - dispatcher_.deferredDelete(std::move(draining_client_)); - } - - if (client.closed_with_active_rq_) { - checkForDrained(); - } - } - - if (event == Network::ConnectionEvent::Connected) { - conn_connect_ms_->complete(); - - client.upstream_ready_ = true; - onUpstreamReady(); - } - - if (client.connect_timer_) { - client.connect_timer_->disableTimer(); - client.connect_timer_.reset(); - } -} - void ConnPoolImpl::movePrimaryClientToDraining() { - ENVOY_CONN_LOG(debug, "moving primary to draining", *primary_client_->client_); - if (draining_client_) { - // This should pretty much never happen, but is possible if we start draining and then get - // a goaway for example. In this case just kill the current draining connection. It's not - // worth keeping a list. - draining_client_->client_->close(); - } - - ASSERT(!draining_client_); - if (primary_client_->client_->numActiveRequests() == 0) { + ENVOY_CONN_LOG(debug, "moving primary to draining", *primaryClient()->codec_client_); + if (primaryClient()->codec_client_->numActiveRequests() == 0) { // If we are making a new connection and the primary does not have any active requests just // close it now. - primary_client_->client_->close(); + primaryClient()->codec_client_->close(); } else { - draining_client_ = std::move(primary_client_); + // TODO(ggreenway): get correct source list + primaryClient()->moveBetweenLists(busy_clients_, draining_clients_); } - ASSERT(!primary_client_); + ASSERT(!primaryClient()); } void ConnPoolImpl::onGoAway(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "remote goaway", *client.client_); + ENVOY_CONN_LOG(debug, "remote goaway", *client.codec_client_); host_->cluster().stats().upstream_cx_close_notify_.inc(); - if (&client == primary_client_.get()) { + if (client.state_ == ConnPoolImplBase::ActiveClient::State::READY || + client.state_ == ConnPoolImplBase::ActiveClient::State::BUSY) { movePrimaryClientToDraining(); } } void ConnPoolImpl::onStreamDestroy(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.client_, - client.client_->numActiveRequests()); + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.codec_client_, + client.codec_client_->numActiveRequests()); host_->stats().rq_active_.dec(); host_->cluster().stats().upstream_rq_active_.dec(); host_->cluster().resourceManager(priority_).requests().dec(); - if (&client == draining_client_.get() && client.client_->numActiveRequests() == 0) { + if (client.state_ == ActiveClient::State::DRAINING && + client.codec_client_->numActiveRequests() == 0) { // Close out the draining client if we no long have active requests. - client.client_->close(); + client.codec_client_->close(); } // If we are destroying this stream because of a disconnect, do not check for drain here. We will @@ -254,24 +137,22 @@ void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason r void ConnPoolImpl::onUpstreamReady() { // Establishes new codec streams for each pending request. - while (!pending_requests_.empty()) { - newClientStream(pending_requests_.back()->decoder_, pending_requests_.back()->callbacks_); + while (!pending_requests_.empty() && primaryClient() != nullptr) { + newClientStream(*primaryClient(), pending_requests_.back()->decoder_, + pending_requests_.back()->callbacks_); pending_requests_.pop_back(); } } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : parent_(parent), - connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { - parent_.conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); + : ConnPoolImplBase::ActiveClient(parent), parent_(parent) { Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); real_host_description_ = data.host_description_; - client_ = parent_.createCodecClient(data); - client_->addConnectionCallbacks(*this); - client_->setCodecClientCallbacks(*this); - client_->setCodecConnectionCallbacks(*this); + codec_client_ = parent_.createCodecClient(data); + codec_client_->addConnectionCallbacks(*this); + codec_client_->setCodecClientCallbacks(*this); + codec_client_->setCodecConnectionCallbacks(*this); connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); parent_.host_->stats().cx_total_.inc(); @@ -279,14 +160,13 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent_.host_->cluster().stats().upstream_cx_total_.inc(); parent_.host_->cluster().stats().upstream_cx_active_.inc(); parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); - conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); - client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); + codec_client_->setConnectionStats( + {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); } ConnPoolImpl::ActiveClient::~ActiveClient() { diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 18bf6b20e8d24..60625df82bd34 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -10,7 +10,6 @@ #include "envoy/stats/timespan.h" #include "envoy/upstream/upstream.h" -#include "common/http/codec_client.h" #include "common/http/conn_pool_base.h" namespace Envoy { @@ -28,36 +27,23 @@ class ConnPoolImpl : public ConnPoolImplBase { Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - ~ConnPoolImpl() override; // Http::ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http2; } - void drainConnections() override; - bool hasActiveConnections() const override; ConnectionPool::Cancellable* newStream(Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) override; Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; protected: - struct ActiveClient : public Network::ConnectionCallbacks, - public CodecClientCallbacks, + struct ActiveClient : public CodecClientCallbacks, public Http::ConnectionCallbacks, public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); ~ActiveClient() override; - void onConnectTimeout() { parent_.onConnectTimeout(*this); } - // ConnPoolImpl::ActiveClient - void close() override { client_->close(); } - uint64_t connectionId() const override { return client_->id(); } - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} + bool hasActiveRequests() const override { return codec_client_->numActiveRequests() > 0; } + bool closingWithIncompleteRequest() const override { return closed_with_active_rq_; } // CodecClientCallbacks void onStreamDestroy() override { parent_.onStreamDestroy(*this); } @@ -69,33 +55,35 @@ class ConnPoolImpl : public ConnPoolImplBase { void onGoAway() override { parent_.onGoAway(*this); } ConnPoolImpl& parent_; - CodecClientPtr client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; uint64_t total_streams_{}; - Event::TimerPtr connect_timer_; - bool upstream_ready_{}; - Stats::TimespanPtr conn_length_; bool closed_with_active_rq_{}; }; using ActiveClientPtr = std::unique_ptr; - // Http::ConnPoolImplBase - void checkForDrained() override; + // TODO: delete me when no longer needed + ActiveClient* primaryClient() { + if (!ready_clients_.empty()) { + return static_cast(ready_clients_.front().get()); + } + + if (!busy_clients_.empty()) { + return static_cast(busy_clients_.front().get()); + } + + return nullptr; + } virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; virtual uint32_t maxTotalStreams() PURE; void movePrimaryClientToDraining(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); void onGoAway(ActiveClient& client); void onStreamDestroy(ActiveClient& client); void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); - void newClientStream(Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); + void newClientStream(ActiveClient& client, Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks); void onUpstreamReady(); - Stats::TimespanPtr conn_connect_ms_; - ActiveClientPtr primary_client_; - ActiveClientPtr draining_client_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; }; From 3a35fa62cfd5215ea84bf4015195e316f7ffc2f1 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 11:09:51 -0800 Subject: [PATCH 07/41] Working version; tests pass Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 313 +++++++++++++++++------ source/common/http/conn_pool_base.h | 53 +++- source/common/http/http1/conn_pool.cc | 165 +++--------- source/common/http/http1/conn_pool.h | 31 +-- source/common/http/http2/conn_pool.cc | 144 +++-------- source/common/http/http2/conn_pool.h | 46 +--- test/common/http/http1/conn_pool_test.cc | 16 +- test/common/http/http2/conn_pool_test.cc | 33 ++- test/test_common/utility.cc | 14 +- test/test_common/utility.h | 5 + 10 files changed, 407 insertions(+), 413 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index c714267ccf773..cad8aeaac6dca 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -6,26 +6,15 @@ namespace Envoy { namespace Http { -ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent) - : base_parent_(parent), connect_timer_(base_parent_.dispatcher_.createTimer( - [this]() -> void { onConnectTimeout(); })) { - conn_connect_ms_ = std::make_unique( - base_parent_.host_->cluster().stats().upstream_cx_connect_ms_, - base_parent_.dispatcher_.timeSource()); - conn_length_ = std::make_unique( - base_parent_.host_->cluster().stats().upstream_cx_length_ms_, - base_parent_.dispatcher_.timeSource()); - connect_timer_->enableTimer(base_parent_.host_->cluster().connectTimeout()); -} - -void ConnPoolImplBase::ActiveClient::onConnectTimeout() { - ENVOY_CONN_ID_LOG(debug, "connect timeout", connectionId()); - base_parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - close(); -} +ConnPoolImplBase::ConnPoolImplBase( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), + transport_socket_options_(transport_socket_options) {} ConnPoolImplBase::~ConnPoolImplBase() { - for (auto* list : {&ready_clients_, &busy_clients_, &draining_clients_}) { + for (auto* list : {&ready_clients_, &busy_clients_}) { while (!list->empty()) { list->front()->close(); } @@ -35,13 +24,123 @@ ConnPoolImplBase::~ConnPoolImplBase() { dispatcher_.clearDeferredDeleteList(); } +void ConnPoolImplBase::createNewConnection() { + ENVOY_LOG(debug, "creating a new connection"); + ActiveClientPtr client = instantiateActiveClient(); + client->state_ = ActiveClient::State::CONNECTING; + client->moveIntoList(std::move(client), owningList(client->state_)); +} + +void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + ASSERT(client.state_ == ActiveClient::State::READY); + + if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { + ENVOY_LOG(debug, "max requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + } else { + ENVOY_CONN_LOG(debug, "creating stream", *client.codec_client_); + StreamEncoder& new_encoder = client.newStreamEncoder(response_decoder); + + bool drain = false; + // remaining_requests_ == 0 means no limit, so handle 0 specially. + if (client.remaining_requests_ > 0) { + client.remaining_requests_--; + if (client.remaining_requests_ == 0) { + drain = true; + } + } + + if (drain) { + ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", *client.codec_client_); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + setActiveClientState(client, ActiveClient::State::DRAINING); + } else if (client.codec_client_->numActiveRequests() >= client.concurrent_request_limit_) { + setActiveClientState(client, ActiveClient::State::BUSY); + } + + host_->stats().rq_total_.inc(); + host_->stats().rq_active_.inc(); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->cluster().stats().upstream_rq_active_.inc(); + host_->cluster().resourceManager(priority_).requests().inc(); + callbacks.onPoolReady(new_encoder, client.real_host_description_, + client.codec_client_->streamInfo()); + } +} + +void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attaching_request) { + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.codec_client_, + client.codec_client_->numActiveRequests()); + host_->stats().rq_active_.dec(); + host_->cluster().stats().upstream_rq_active_.dec(); + host_->cluster().resourceManager(priority_).requests().dec(); + if (client.state_ == ActiveClient::State::DRAINING && + client.codec_client_->numActiveRequests() == 0) { + // Close out the draining client if we no long have active requests. + client.codec_client_->close(); + } else if (client.state_ == ActiveClient::State::BUSY) { + // A request was just ended, so we should be below the limit now. + ASSERT(client.codec_client_->numActiveRequests() < client.concurrent_request_limit_); + + setActiveClientState(client, ActiveClient::State::READY); + if (!delay_attaching_request) { + onUpstreamReady(); + } + } +} + +ConnectionPool::Cancellable* ConnPoolImplBase::newStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + if (!ready_clients_.empty()) { + ActiveClient& client = *ready_clients_.front(); + ENVOY_CONN_LOG(debug, "using existing connection", *client.codec_client_); + attachRequestToClient(client, response_decoder, callbacks); + return nullptr; + } + + if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + + // If we have no connections at all, make one no matter what so we don't starve. + if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { + createNewConnection(); + } + + return newPendingRequest(response_decoder, callbacks); + } else { + ENVOY_LOG(debug, "max pending requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } +} + +void ConnPoolImplBase::onUpstreamReady() { + while (!pending_requests_.empty() && !ready_clients_.empty()) { + ActiveClientPtr& client = ready_clients_.front(); + ENVOY_CONN_LOG(debug, "attaching to next request", *client->codec_client_); + // Pending requests are pushed onto the front, so pull from the back. + attachRequestToClient(*client, pending_requests_.back()->decoder_, + pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } +} + void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { drained_callbacks_.push_back(cb); checkForDrained(); } bool ConnPoolImplBase::hasActiveConnections() const { - if (!pending_requests_.empty() || !busy_clients_.empty() || !draining_clients_.empty()) { + if (!pending_requests_.empty() || !busy_clients_.empty()) { return true; } @@ -54,45 +153,63 @@ bool ConnPoolImplBase::hasActiveConnections() const { return false; } +std::list& +ConnPoolImplBase::owningList(ActiveClient::State state) { + switch (state) { + case ActiveClient::State::CONNECTING: + return busy_clients_; + case ActiveClient::State::READY: + return ready_clients_; + case ActiveClient::State::BUSY: + return busy_clients_; + case ActiveClient::State::DRAINING: + return busy_clients_; + case ActiveClient::State::CLOSED: + NOT_REACHED_GCOVR_EXCL_LINE; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void ConnPoolImplBase::setActiveClientState(ActiveClient& client, ActiveClient::State new_state) { + auto& old_list = owningList(client.state_); + auto& new_list = owningList(new_state); + client.state_ = new_state; + if (&old_list != &new_list) { + client.moveBetweenLists(old_list, new_list); + } +} + void ConnPoolImplBase::closeIdleConnections() { // Create a separate list of elements to close to avoid mutate-while-iterating problems. - std::list toClose; + std::list toClose; // Possibly-idle connections are always in the ready_clients_ list for (auto& client : ready_clients_) { - if (client->hasActiveRequests()) { - toClose.push_back(&client); + if (!client->hasActiveRequests()) { + toClose.push_back(client.get()); } } - while (!toClose.empty()) { - (*toClose.front())->close(); - } -} - -std::list* ConnPoolImplBase::owningList(ActiveClient& client) { - switch (client.state_) { - case ActiveClient::State::CONNECTING: - return &busy_clients_; - case ActiveClient::State::READY: - return &ready_clients_; - case ActiveClient::State::BUSY: - return &busy_clients_; - case ActiveClient::State::DRAINING: - return &draining_clients_; - case ActiveClient::State::CLOSED: - return nullptr; + for (auto& entry : toClose) { + entry->close(); } - NOT_REACHED_GCOVR_EXCL_LINE; } void ConnPoolImplBase::drainConnections() { closeIdleConnections(); - while (!busy_clients_.empty()) { - auto& client = busy_clients_.front(); - client->state_ = ConnPoolImplBase::ActiveClient::State::DRAINING; - client->moveBetweenLists(busy_clients_, draining_clients_); + // closeIdleConnections() closes all connections in ready_clients_ with no active requests, + // so all remaining entries in ready_clients_ are serving requests. Move them and all entries + // in busy_clients_ to draining. + while (!ready_clients_.empty()) { + setActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); + } + + // Changing busy_clients_ to DRAINING does not move them between lists, + // so use a for-loop since the list is not mutated. + ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); + for (auto& busy_client : busy_clients_) { + setActiveClientState(*busy_client, ActiveClient::State::DRAINING); } } @@ -103,7 +220,7 @@ void ConnPoolImplBase::checkForDrained() { closeIdleConnections(); - if (pending_requests_.empty() && busy_clients_.empty() && draining_clients_.empty()) { + if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty()) { ENVOY_LOG(debug, "invoking drained callbacks"); for (const DrainedCb& cb : drained_callbacks_) { cb(); @@ -124,9 +241,9 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); } - ActiveClientPtr removed; - switch (client.state_) { - case ActiveClient::State::CONNECTING: + const bool check_for_drained = client.closingWithIncompleteRequest(); + + if (client.state_ == ActiveClient::State::CONNECTING) { host_->cluster().stats().upstream_cx_connect_fail_.inc(); host_->stats().cx_connect_fail_.inc(); @@ -138,50 +255,36 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, // if retry logic submits a new request to the pool, we don't fail it inline. purgePendingRequests(client.real_host_description_, client.codec_client_->connectionFailureReason()); - break; - - case ConnPoolImplBase::ActiveClient::State::READY: - dispatcher_.deferredDelete(client.removeFromList(ready_clients_)); - if (client.closingWithIncompleteRequest()) { - checkForDrained(); - } - break; + } - case ConnPoolImplBase::ActiveClient::State::BUSY: - dispatcher_.deferredDelete(client.removeFromList(busy_clients_)); - checkForDrained(); - break; + // We need to release our resourceManager() resources before checking below for + // whether we can create a new connection. Normally this would happen when + // client's destructor runs, but this object needs to be deferredDelete'd(), so + // this forces part of it's cleanup to happen now. + client.releaseResources(); - case ConnPoolImplBase::ActiveClient::State::DRAINING: - dispatcher_.deferredDelete(client.removeFromList(draining_clients_)); + dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); + if (check_for_drained) { checkForDrained(); - break; - - case ConnPoolImplBase::ActiveClient::State::CLOSED: - PANIC("Connection closed event received while already in CLOSED state."); - break; } - // bool was_draining = (client.state_ == ConnPoolImplBase::ActiveClient::State::DRAINING); - client.state_ = ConnPoolImplBase::ActiveClient::State::CLOSED; + + client.state_ = ActiveClient::State::CLOSED; // If we have pending requests and we just lost a connection we should make a new one. - // TODO(ggreenway): This math only works for http1; figure out how many request slots there - // are in ready_clients_, and find the CONNECTING clients in busy_clients_ and count them as - // available. - /* - if (!was_draining && pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) - { createNewConnection(); + if (!pending_requests_.empty() && + host_->cluster().resourceManager(priority_).requests().canCreate() && + host_->cluster().resourceManager(priority_).connections().canCreate()) { + createNewConnection(); } - */ } else if (event == Network::ConnectionEvent::Connected) { client.conn_connect_ms_->complete(); client.conn_connect_ms_.reset(); - ASSERT(client.state_ == ConnPoolImplBase::ActiveClient::State::CONNECTING); - client.state_ = ConnPoolImplBase::ActiveClient::State::READY; - client.moveBetweenLists(busy_clients_, ready_clients_); + ASSERT(client.state_ == ActiveClient::State::CONNECTING); + setActiveClientState(client, ActiveClient::State::READY); onUpstreamReady(); + checkForDrained(); } if (client.connect_timer_) { @@ -242,5 +345,57 @@ void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { checkForDrained(); } +ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent, + uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit) + : parent_(parent), remaining_requests_(lifetime_request_limit), + concurrent_request_limit_(concurrent_request_limit), + connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + codec_client_ = parent_.createCodecClient(data); + codec_client_->addConnectionCallbacks(*this); + + conn_connect_ms_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); + conn_length_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); + connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); + + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); + + codec_client_->setConnectionStats( + {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); +} + +ConnPoolImplBase::ActiveClient::~ActiveClient() { releaseResources(); } + +void ConnPoolImplBase::ActiveClient::releaseResources() { + if (!resources_released_) { + resources_released_ = true; + + conn_length_->complete(); + + parent_.host_->cluster().stats().upstream_cx_active_.dec(); + parent_.host_->stats().cx_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); + } +} + +void ConnPoolImplBase::ActiveClient::onConnectTimeout() { + ENVOY_CONN_ID_LOG(debug, "connect timeout", connectionId()); + parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + close(); +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index e6b12ba192b9a..730ec43237c0b 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -18,14 +18,18 @@ class ConnPoolImplBase : public ConnectionPool::Instance, protected Logger::Loggable { public: // ConnectionPool::Instance + ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) override; void addDrainedCallback(DrainedCb cb) override; bool hasActiveConnections() const override; void drainConnections() override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; protected: ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - Event::Dispatcher& dispatcher) - : host_(host), priority_(priority), dispatcher_(dispatcher) {} + Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); virtual ~ConnPoolImplBase(); // ActiveClient provides a base class for connection pool clients that handles connection timings @@ -34,12 +38,15 @@ class ConnPoolImplBase : public ConnectionPool::Instance, public Network::ConnectionCallbacks, public Event::DeferredDeletable { public: - ActiveClient(ConnPoolImplBase& parent); - virtual ~ActiveClient() { conn_length_->complete(); } + ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit); + virtual ~ActiveClient(); + + void releaseResources(); // Network::ConnectionCallbacks void onEvent(Network::ConnectionEvent event) override { - base_parent_.onConnectionEvent(*this, event); + parent_.onConnectionEvent(*this, event); } void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} @@ -51,15 +58,20 @@ class ConnPoolImplBase : public ConnectionPool::Instance, virtual bool hasActiveRequests() const PURE; virtual bool closingWithIncompleteRequest() const PURE; + virtual StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) PURE; enum class State { CONNECTING, READY, BUSY, DRAINING, CLOSED }; + + ConnPoolImplBase& parent_; + uint64_t remaining_requests_; + const uint64_t concurrent_request_limit_; State state_{State::CONNECTING}; - ConnPoolImplBase& base_parent_; // TODO: combine this and child parent_ CodecClientPtr codec_client_; Upstream::HostDescriptionConstSharedPtr real_host_description_; Stats::TimespanPtr conn_connect_ms_; Stats::TimespanPtr conn_length_; Event::TimerPtr connect_timer_; + bool resources_released_{false}; }; using ActiveClientPtr = std::unique_ptr; @@ -79,8 +91,14 @@ class ConnPoolImplBase : public ConnectionPool::Instance, using PendingRequestPtr = std::unique_ptr; + // Create a new CodecClient + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; + + // Returns a new instance of ActiveClient. + virtual ActiveClientPtr instantiateActiveClient() PURE; + // Gets a pointer to the list that currently owns this client. - std::list* owningList(ActiveClient& client); + std::list& owningList(ActiveClient::State state); // Creates a new PendingRequest and enqueues it into the request queue. ConnectionPool::Cancellable* newPendingRequest(StreamDecoder& decoder, @@ -96,15 +114,27 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // Closes any idle connections. void closeIdleConnections(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); + // Called by derived classes anytime a request is completed or destroyed for any reason. + void onRequestClosed(ActiveClient& client, bool delay_attaching_request); - // Must be implemented by sub class. Attempts to drain inactive clients. + // Changes the state_ of an ActiveClient and moves to the appropriate list. + void setActiveClientState(ActiveClient& client, ActiveClient::State new_state); + + void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); void checkForDrained(); - virtual void onUpstreamReady() PURE; + void onUpstreamReady(); + void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks); + void createNewConnection(); +public: const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; + +protected: Event::Dispatcher& dispatcher_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; std::list drained_callbacks_; std::list pending_requests_; @@ -118,9 +148,6 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // Clients that are not ready to handle additional requests. std::list busy_clients_; - - // Clients that are draining but have not completed all outstanding requests yet. - std::list draining_clients_; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index c153482465914..be34cf2fff18c 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -28,60 +28,16 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha const Network::ConnectionSocket::OptionsSharedPtr& options, const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher), socket_options_(options), - transport_socket_options_(transport_socket_options), - upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })), + : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options), + upstream_ready_timer_(dispatcher_.createTimer([this]() { + upstream_ready_enabled_ = false; + onUpstreamReady(); + })), settings_(settings) {} -ConnPoolImpl::~ConnPoolImpl() {} - -void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(!client.stream_wrapper_); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->stats().rq_total_.inc(); - client.stream_wrapper_ = std::make_unique(response_decoder, client); - callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_, - client.codec_client_->streamInfo()); -} - -void ConnPoolImpl::createNewConnection() { - ENVOY_LOG(debug, "creating a new connection"); - auto client = std::make_unique(*this); - client->state_ = ConnPoolImplBase::ActiveClient::State::BUSY; - client->moveIntoList(std::move(client), busy_clients_); -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!ready_clients_.empty()) { - firstReady().state_ = ConnPoolImplBase::ActiveClient::State::BUSY; - firstReady().moveBetweenLists(ready_clients_, busy_clients_); - ENVOY_CONN_LOG(debug, "using existing connection", *firstBusy().codec_client_); - attachRequestToClient(firstBusy(), response_decoder, callbacks); - return nullptr; - } - - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - - // If we have no connections at all, make one no matter what so we don't starve. - if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { - createNewConnection(); - } - - return newPendingRequest(response_decoder, callbacks); - } else { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } +ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { + return std::make_unique(*this); } void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { @@ -91,61 +47,23 @@ void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { void ConnPoolImpl::onResponseComplete(ActiveClient& client) { ENVOY_CONN_LOG(debug, "response complete", *client.codec_client_); + if (!client.stream_wrapper_->encode_complete_) { ENVOY_CONN_LOG(debug, "response before request complete", *client.codec_client_); onDownstreamReset(client); } else if (client.stream_wrapper_->close_connection_ || client.codec_client_->remoteClosed()) { ENVOY_CONN_LOG(debug, "saw upstream close connection", *client.codec_client_); onDownstreamReset(client); - } else if (client.remaining_requests_ > 0 && --client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection", *client.codec_client_); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - onDownstreamReset(client); } else { - // Upstream connection might be closed right after response is complete. Setting delay=true - // here to attach pending requests in next dispatcher loop to handle that case. - // https://github.com/envoyproxy/envoy/issues/2715 - processIdleClient(client, true); - } -} + client.stream_wrapper_.reset(); -void ConnPoolImpl::onUpstreamReady() { - upstream_ready_enabled_ = false; - while (!pending_requests_.empty() && !ready_clients_.empty()) { - ActiveClient& client = firstReady(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - // There is work to do so bind a request to the client and move it to the busy list. Pending - // requests are pushed onto the front, so pull from the back. - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - client.moveBetweenLists(ready_clients_, busy_clients_); - } -} - -void ConnPoolImpl::processIdleClient(ActiveClient& client, bool delay) { - client.stream_wrapper_.reset(); - if (pending_requests_.empty() || delay) { - // There is nothing to service or delayed processing is requested, so just move the connection - // into the ready list. - ENVOY_CONN_LOG(debug, "moving to ready", *client.codec_client_); - client.moveBetweenLists(busy_clients_, ready_clients_); - } else { - // There is work to do immediately so bind a request to the client and move it to the busy list. - // Pending requests are pushed onto the front, so pull from the back. - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } + if (!pending_requests_.empty() && !upstream_ready_enabled_) { + upstream_ready_enabled_ = true; + upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + } - // - if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { - upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + checkForDrained(); } - - checkForDrained(); } ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, ActiveClient& parent) @@ -153,8 +71,6 @@ ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, Acti StreamDecoderWrapper(response_decoder), parent_(parent) { StreamEncoderWrapper::inner_.getStream().addCallbacks(*this); - parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); - parent_.parent_.host_->stats().rq_active_.inc(); // TODO (tonya11en): At the time of writing, there is no way to mix different versions of HTTP // traffic in the same cluster, so incrementing the request count in the per-cluster resource @@ -162,13 +78,13 @@ ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, Acti // counts would be tracked the same way in all HTTP versions. // // See: https://github.com/envoyproxy/envoy/issues/9215 - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().inc(); } ConnPoolImpl::StreamWrapper::~StreamWrapper() { - parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); - parent_.parent_.host_->stats().rq_active_.dec(); - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().dec(); + // Upstream connection might be closed right after response is complete. Setting delay=true + // here to attach pending requests in next dispatcher loop to handle that case. + // https://github.com/envoyproxy/envoy/issues/2715 + parent_.parent().onRequestClosed(parent_, true); } void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } @@ -196,38 +112,27 @@ void ConnPoolImpl::StreamWrapper::decodeHeaders(HeaderMapPtr&& headers, bool end void ConnPoolImpl::StreamWrapper::onDecodeComplete() { decode_complete_ = encode_complete_; - parent_.parent_.onResponseComplete(parent_); + parent_.parent().onResponseComplete(parent_); } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent), parent_(parent), - remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { - - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - codec_client_ = parent_.createCodecClient(data); - codec_client_->addConnectionCallbacks(*this); - - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); - - codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); + : ConnPoolImplBase::ActiveClient( + parent, parent.host_->cluster().maxRequestsPerConnection(), + 1 // HTTP1 always has a concurrent-request-limit of 1 per connection + ) { + parent.host_->cluster().stats().upstream_cx_http1_total_.inc(); +} + +bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { return stream_wrapper_ != nullptr; } + +bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { + return (stream_wrapper_ != nullptr) && (!stream_wrapper_->decode_complete_); } -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); +StreamEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(StreamDecoder& response_decoder) { + ASSERT(!stream_wrapper_); + stream_wrapper_ = std::make_unique(response_decoder, *this); + return *stream_wrapper_; } CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 20f35bec33bdd..c8a76c64f24c0 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -35,13 +35,13 @@ class ConnPoolImpl : public ConnPoolImplBase { const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - ~ConnPoolImpl() override; + ~ConnPoolImpl() override = default; // ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http11; } - ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + + // ConnPoolImplBase + ActiveClientPtr instantiateActiveClient() override; protected: struct ActiveClient; @@ -62,7 +62,7 @@ class ConnPoolImpl : public ConnPoolImplBase { // Http::StreamCallbacks void onResetStream(StreamResetReason, absl::string_view) override { - parent_.parent_.onDownstreamReset(parent_); + parent_.parent().onDownstreamReset(parent_); } void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} @@ -77,32 +77,23 @@ class ConnPoolImpl : public ConnPoolImplBase { struct ActiveClient : public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; + ~ActiveClient() override = default; + + ConnPoolImpl& parent() { return static_cast(parent_); } // ConnPoolImplBase::ActiveClient - bool hasActiveRequests() const override { return stream_wrapper_ != nullptr; } - bool closingWithIncompleteRequest() const override { - return (stream_wrapper_ != nullptr) && (!stream_wrapper_->decode_complete_); - } + bool hasActiveRequests() const override; + bool closingWithIncompleteRequest() const override; + StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) override; - ConnPoolImpl& parent_; StreamWrapperPtr stream_wrapper_; - uint64_t remaining_requests_; }; - void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks); - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - void createNewConnection(); void onDownstreamReset(ActiveClient& client); void onResponseComplete(ActiveClient& client); - void onUpstreamReady(); - void processIdleClient(ActiveClient& client, bool delay); ActiveClient& firstReady() const { return static_cast(*ready_clients_.front()); } ActiveClient& firstBusy() const { return static_cast(*busy_clients_.front()); } - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; Event::TimerPtr upstream_ready_timer_; bool upstream_ready_enabled_{false}; const Http1Settings settings_; diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index d33abefbf2490..b448fbcfc3355 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -18,102 +18,26 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher), socket_options_(options), - transport_socket_options_(transport_socket_options) {} - -void ConnPoolImpl::newClientStream(ActiveClient& client, Http::StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { - ENVOY_LOG(debug, "max requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - } else { - ENVOY_CONN_LOG(debug, "creating stream", *client.codec_client_); - client.total_streams_++; - host_->stats().rq_total_.inc(); - host_->stats().rq_active_.inc(); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->cluster().stats().upstream_rq_active_.inc(); - host_->cluster().resourceManager(priority_).requests().inc(); - callbacks.onPoolReady(client.codec_client_->newStream(response_decoder), - client.real_host_description_, client.codec_client_->streamInfo()); - } -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(Http::StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(drained_callbacks_.empty()); - - // First see if we need to handle max streams rollover. - uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); - if (max_streams == 0) { - max_streams = maxTotalStreams(); - } - - if (primaryClient() && primaryClient()->total_streams_ >= max_streams) { - movePrimaryClientToDraining(); - } - - if (!primaryClient()) { - auto client = std::make_unique(*this); - client->moveIntoList(std::move(client), busy_clients_); - } - - // If there are no ready clients yet, queue up the request. - if (ready_clients_.empty()) { - // If we're not allowed to enqueue more requests, fail fast. - if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } - - return newPendingRequest(response_decoder, callbacks); - } - - // We already have an active client that's connected to upstream, so attempt to establish a - // new stream. - newClientStream(*primaryClient(), response_decoder, callbacks); - return nullptr; -} - -void ConnPoolImpl::movePrimaryClientToDraining() { - ENVOY_CONN_LOG(debug, "moving primary to draining", *primaryClient()->codec_client_); - if (primaryClient()->codec_client_->numActiveRequests() == 0) { - // If we are making a new connection and the primary does not have any active requests just - // close it now. - primaryClient()->codec_client_->close(); - } else { - // TODO(ggreenway): get correct source list - primaryClient()->moveBetweenLists(busy_clients_, draining_clients_); - } + : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options) {} - ASSERT(!primaryClient()); +ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { + return std::make_unique(*this); } - void ConnPoolImpl::onGoAway(ActiveClient& client) { ENVOY_CONN_LOG(debug, "remote goaway", *client.codec_client_); host_->cluster().stats().upstream_cx_close_notify_.inc(); - if (client.state_ == ConnPoolImplBase::ActiveClient::State::READY || - client.state_ == ConnPoolImplBase::ActiveClient::State::BUSY) { - movePrimaryClientToDraining(); + if (client.state_ != ActiveClient::State::DRAINING) { + if (client.codec_client_->numActiveRequests() == 0) { + client.codec_client_->close(); + } else { + setActiveClientState(client, ActiveClient::State::DRAINING); + } } } void ConnPoolImpl::onStreamDestroy(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.codec_client_, - client.codec_client_->numActiveRequests()); - host_->stats().rq_active_.dec(); - host_->cluster().stats().upstream_rq_active_.dec(); - host_->cluster().resourceManager(priority_).requests().dec(); - if (client.state_ == ActiveClient::State::DRAINING && - client.codec_client_->numActiveRequests() == 0) { - // Close out the draining client if we no long have active requests. - client.codec_client_->close(); - } + onRequestClosed(client, false); // If we are destroying this stream because of a disconnect, do not check for drain here. We will // wait until the connection has been fully drained of streams and then check in the connection @@ -135,42 +59,34 @@ void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason r } } -void ConnPoolImpl::onUpstreamReady() { - // Establishes new codec streams for each pending request. - while (!pending_requests_.empty() && primaryClient() != nullptr) { - newClientStream(*primaryClient(), pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); +uint64_t ConnPoolImpl::maxRequestsPerConnection() { + uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); + if (max_streams == 0) { + max_streams = maxTotalStreams(); } + return max_streams; } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent), parent_(parent) { - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - codec_client_ = parent_.createCodecClient(data); - codec_client_->addConnectionCallbacks(*this); + : ConnPoolImplBase::ActiveClient( + parent, parent.maxRequestsPerConnection(), + std::numeric_limits::max() /* TODO(ggreenway): get limit from config */) { codec_client_->setCodecClientCallbacks(*this); codec_client_->setCodecConnectionCallbacks(*this); - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); - - codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); + parent.host_->cluster().stats().upstream_cx_http2_total_.inc(); +} + +bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { + return codec_client_->numActiveRequests() > 0; +} + +bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { + return closed_with_active_rq_; } -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().stats().upstream_cx_active_.dec(); +StreamEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(StreamDecoder& response_decoder) { + return codec_client_->newStream(response_decoder); } CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 60625df82bd34..2c9a8792ab71a 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -30,62 +30,42 @@ class ConnPoolImpl : public ConnPoolImplBase { // Http::ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http2; } - ConnectionPool::Cancellable* newStream(Http::StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + + // ConnPoolImplBase + ActiveClientPtr instantiateActiveClient() override; protected: struct ActiveClient : public CodecClientCallbacks, public Http::ConnectionCallbacks, public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; + ~ActiveClient() override = default; + + ConnPoolImpl& parent() { return static_cast(parent_); } // ConnPoolImpl::ActiveClient - bool hasActiveRequests() const override { return codec_client_->numActiveRequests() > 0; } - bool closingWithIncompleteRequest() const override { return closed_with_active_rq_; } + bool hasActiveRequests() const override; + bool closingWithIncompleteRequest() const override; + StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) override; // CodecClientCallbacks - void onStreamDestroy() override { parent_.onStreamDestroy(*this); } + void onStreamDestroy() override { parent().onStreamDestroy(*this); } void onStreamReset(Http::StreamResetReason reason) override { - parent_.onStreamReset(*this, reason); + parent().onStreamReset(*this, reason); } // Http::ConnectionCallbacks - void onGoAway() override { parent_.onGoAway(*this); } + void onGoAway() override { parent().onGoAway(*this); } - ConnPoolImpl& parent_; - uint64_t total_streams_{}; bool closed_with_active_rq_{}; }; - using ActiveClientPtr = std::unique_ptr; - - // TODO: delete me when no longer needed - ActiveClient* primaryClient() { - if (!ready_clients_.empty()) { - return static_cast(ready_clients_.front().get()); - } - - if (!busy_clients_.empty()) { - return static_cast(busy_clients_.front().get()); - } - - return nullptr; - } - - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; virtual uint32_t maxTotalStreams() PURE; + uint64_t maxRequestsPerConnection(); void movePrimaryClientToDraining(); void onGoAway(ActiveClient& client); void onStreamDestroy(ActiveClient& client); void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); - void newClientStream(ActiveClient& client, Http::StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks); - void onUpstreamReady(); - - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; }; /** diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index b8fe1eec6c22e..8f6026f0d8627 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -98,10 +98,10 @@ class ConnPoolImplForTest : public ConnPoolImpl { } }, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); } @@ -132,7 +132,7 @@ class Http1ConnPoolImplTest : public testing::Test { conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} ~Http1ConnPoolImplTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); } NiceMock dispatcher_; @@ -170,8 +170,8 @@ struct ActiveTestRequest { } if (type == Type::CreateConnection) { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); expectNewStream(); + EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( Network::ConnectionEvent::Connected); } @@ -417,34 +417,34 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { // Move time forward, signal that the first connect completed and verify the time to connect. uint64_t upstream_cx_connect_ms1 = 0; simulated_time.sleep(std::chrono::milliseconds(sleep2_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); r1.expectNewStream(); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); // Move time forward, signal that the second connect completed and verify the time to connect. uint64_t upstream_cx_connect_ms2 = 0; simulated_time.sleep(std::chrono::milliseconds(sleep3_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); r2.expectNewStream(); + EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); // Cleanup, cause the connections to go away. for (auto& test_client : conn_pool_.test_clients_) { - EXPECT_CALL(conn_pool_, onClientDestroy()); EXPECT_CALL( cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + EXPECT_CALL(conn_pool_, onClientDestroy()); test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); } - dispatcher_.clearDeferredDeleteList(); } /** @@ -874,10 +874,10 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { NiceMock request_encoder; Http::StreamDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index c7dc93f8ccc74..c777649547cfa 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -67,7 +67,7 @@ class Http2ConnPoolImplTest : public testing::Test { pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} ~Http2ConnPoolImplTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); } // Creates a new test client, expecting a new connection to be created and associated @@ -79,7 +79,6 @@ class Http2ConnPoolImplTest : public testing::Test { test_client.codec_ = new NiceMock(); test_client.connect_timer_ = new NiceMock(&dispatcher_); test_client.client_dispatcher_ = api_->allocateDispatcher(); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); auto cluster = std::make_shared>(); @@ -96,6 +95,7 @@ class Http2ConnPoolImplTest : public testing::Test { .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { return test_clients_.back().codec_client_; })); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); } // Connects a pending connection for client with the given index, asserting @@ -206,6 +206,8 @@ TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_.host()); } * Verify that connections are drained when requested. */ TEST_F(Http2ConnPoolImplTest, DrainConnections) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; pool_.max_streams_ = 1; @@ -218,24 +220,25 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + // With max_streams == 1, the second request moves the first connection + // to draining. expectClientCreate(); ActiveTestRequest r2(*this, 1, false); expectClientConnect(1, r2); EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); - // This will move primary to draining and destroy draining. + // This will move second connection to draining. pool_.drainConnections(); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - // This will destroy draining. + // This will destroy the 2 draining connections. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); + EXPECT_CALL(*this, onClientDestroy()).Times(2); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } // Verifies that requests are queued up in the conn pool until the connection becomes ready. @@ -394,10 +397,10 @@ TEST_F(Http2ConnPoolImplTest, VerifyConnectionTimingStats) { EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); @@ -513,6 +516,8 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { } TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; pool_.max_streams_ = 1; @@ -548,6 +553,8 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { } TEST_F(Http2ConnPoolImplTest, DrainPrimary) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; pool_.max_streams_ = 1; @@ -582,6 +589,8 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { } TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; pool_.max_streams_ = 1; @@ -590,10 +599,10 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { expectClientConnect(0, r1); EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); - EXPECT_CALL(dispatcher_, deferredDelete_(_)); expectClientCreate(); ActiveTestRequest r2(*this, 1, false); expectClientConnect(1, r2); @@ -601,11 +610,11 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { dispatcher_.clearDeferredDeleteList(); EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); ReadyWatcher drained; - EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(drained, ready()); pool_.addDrainedCallback([&]() -> void { drained.ready(); }); diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index cc15a6087c615..af50748a89500 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -311,16 +311,22 @@ std::string TestUtility::convertTime(const std::string& input, const std::string } // static -bool TestUtility::gaugesZeroed(const std::vector& gauges) { - // Returns true if all gauges are 0 except the circuit_breaker remaining resource +std::string TestUtility::nonZeroedGauges(const std::vector& gauges) { + // Returns all gauges that are 0 except the circuit_breaker remaining resource // gauges which default to the resource max. std::regex omitted(".*circuit_breakers\\..*\\.remaining.*"); + std::string non_zero; for (const Stats::GaugeSharedPtr& gauge : gauges) { if (!std::regex_match(gauge->name(), omitted) && gauge->value() != 0) { - return false; + non_zero.append(fmt::format("{}: {}; ", gauge->name(), gauge->value())); } } - return true; + return non_zero; +} + +// static +bool TestUtility::gaugesZeroed(const std::vector& gauges) { + return nonZeroedGauges(gauges).empty(); } // static diff --git a/test/test_common/utility.h b/test/test_common/utility.h index b7f2087e85810..b22a040a307e4 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -509,6 +509,11 @@ class TestUtility { static bool gaugesZeroed( const std::vector>& gauges); + /** + * Returns the members of gauges that are not zero. Uses the same regex filter as gaugesZeroed(). + */ + static std::string nonZeroedGauges(const std::vector& gauges); + // Strict variants of Protobuf::MessageUtil static void loadFromJson(const std::string& json, Protobuf::Message& message) { MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor()); From 6c5f650a29e66ded704919cecbfff4a348d8b21b Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 11:22:35 -0800 Subject: [PATCH 08/41] fix_format Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 3 +-- source/common/http/conn_pool_base.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index cad8aeaac6dca..7f9496088bf09 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -1,8 +1,7 @@ #include "common/http/conn_pool_base.h" -#include "common/stats/timespan_impl.h" -#include "common/upstream/upstream_impl.h" #include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" namespace Envoy { namespace Http { diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 730ec43237c0b..6567332d1c20c 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -2,13 +2,13 @@ #include "envoy/event/dispatcher.h" #include "envoy/http/conn_pool.h" +#include "envoy/network/connection.h" #include "envoy/stats/timespan.h" #include "common/common/linked_object.h" #include "common/http/codec_client.h" #include "absl/strings/string_view.h" -#include "envoy/network/connection.h" namespace Envoy { namespace Http { From 206c6f33fd8bed67684deb44e76e7f7099ca3ad5 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 12:22:21 -0800 Subject: [PATCH 09/41] cleanup: remove maxTotalStreams(), used only by tests. This can be achieved by using the existing cluster config mocks. Signed-off-by: Greg Greenway --- source/common/http/http2/conn_pool.cc | 9 ++------- source/common/http/http2/conn_pool.h | 10 ++++------ test/common/http/http2/conn_pool_test.cc | 20 ++++++++------------ 3 files changed, 14 insertions(+), 25 deletions(-) diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index b448fbcfc3355..6dffd065b2d90 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -60,11 +60,8 @@ void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason r } uint64_t ConnPoolImpl::maxRequestsPerConnection() { - uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); - if (max_streams == 0) { - max_streams = maxTotalStreams(); - } - return max_streams; + uint64_t max_streams_config = host_->cluster().maxRequestsPerConnection(); + return (max_streams_config != 0) ? max_streams_config : DEFAULT_MAX_STREAMS; } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) @@ -95,8 +92,6 @@ CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnect return codec; } -uint32_t ProdConnPoolImpl::maxTotalStreams() { return MAX_STREAMS; } - } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 2c9a8792ab71a..2c78cc9c22106 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -60,12 +60,15 @@ class ConnPoolImpl : public ConnPoolImplBase { bool closed_with_active_rq_{}; }; - virtual uint32_t maxTotalStreams() PURE; uint64_t maxRequestsPerConnection(); void movePrimaryClientToDraining(); void onGoAway(ActiveClient& client); void onStreamDestroy(ActiveClient& client); void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); + + // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe + // side we do 2^29. + static const uint64_t DEFAULT_MAX_STREAMS = (1 << 29); }; /** @@ -77,11 +80,6 @@ class ProdConnPoolImpl : public ConnPoolImpl { private: CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; - uint32_t maxTotalStreams() override; - - // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe - // side we do 2^29. - static const uint64_t MAX_STREAMS = (1 << 29); }; } // namespace Http2 diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index c777649547cfa..d8b9064e2ea2e 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -44,10 +44,6 @@ class TestConnPoolImpl : public ConnPoolImpl { } MOCK_METHOD1(createCodecClient_, CodecClient*(Upstream::Host::CreateConnectionData& data)); - - uint32_t maxTotalStreams() override { return max_streams_; } - - uint32_t max_streams_{std::numeric_limits::max()}; }; class ActiveTestRequest; @@ -209,7 +205,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; // Test drain connections call prior to any connections being created. pool_.drainConnections(); @@ -282,7 +278,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequests) { // fails to be established. TEST_F(Http2ConnPoolImplTest, PendingRequestsFailure) { InSequence s; - pool_.max_streams_ = 10; + cluster_->max_requests_per_connection_ = 10; // Create three requests. These should be queued up. expectClientCreate(); @@ -494,7 +490,7 @@ TEST_F(Http2ConnPoolImplTest, RemoteReset) { TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -519,7 +515,7 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -556,7 +552,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -592,7 +588,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -751,7 +747,7 @@ TEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection // Show that if connections are draining, they're still considered active. TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); @@ -765,7 +761,7 @@ TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { // Show that once we've drained all connections, there are no longer any active. TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); From 30c38e915238e6d5811fcf60abe0c05b68136290 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 12:37:39 -0800 Subject: [PATCH 10/41] cleanup: translate 0 to MAX so that special code for zero isn't needed Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 7f9496088bf09..2005c443def0b 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -43,16 +43,8 @@ void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, StreamDecoder ENVOY_CONN_LOG(debug, "creating stream", *client.codec_client_); StreamEncoder& new_encoder = client.newStreamEncoder(response_decoder); - bool drain = false; - // remaining_requests_ == 0 means no limit, so handle 0 specially. - if (client.remaining_requests_ > 0) { - client.remaining_requests_--; - if (client.remaining_requests_ == 0) { - drain = true; - } - } - - if (drain) { + client.remaining_requests_--; + if (client.remaining_requests_ == 0) { ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", *client.codec_client_); host_->cluster().stats().upstream_cx_max_requests_.inc(); setActiveClientState(client, ActiveClient::State::DRAINING); @@ -344,11 +336,19 @@ void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { checkForDrained(); } +namespace { +// Translate zero to UINT64_MAX so that the zero/unlimited case doesn't +// have to be handled specially. +uint64_t translateZeroToUnlimited(uint64_t limit) { + return (limit != 0) ? limit : std::numeric_limits::max(); +} +} // namespace + ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, uint64_t concurrent_request_limit) - : parent_(parent), remaining_requests_(lifetime_request_limit), - concurrent_request_limit_(concurrent_request_limit), + : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), + concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); From 85b0e6ecf98315c1ca14130aab7e624af9c2b45e Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 13:52:49 -0800 Subject: [PATCH 11/41] upstream: Enforce max_concurrent_streams on http2 upstream pools. Create additional connections when the limit is reached. Signed-off-by: Greg Greenway --- api/envoy/api/v2/core/protocol.proto | 4 ++ source/common/http/http2/conn_pool.cc | 2 +- test/common/http/http2/conn_pool_test.cc | 53 ++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 1 deletion(-) diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index 6e32ac197ac89..99e26f4d863f6 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -117,6 +117,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 6dffd065b2d90..b689bb1ca05ed 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -67,7 +67,7 @@ uint64_t ConnPoolImpl::maxRequestsPerConnection() { ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) : ConnPoolImplBase::ActiveClient( parent, parent.maxRequestsPerConnection(), - std::numeric_limits::max() /* TODO(ggreenway): get limit from config */) { + parent.host_->cluster().http2Settings().max_concurrent_streams_) { codec_client_->setCodecClientCallbacks(*this); codec_client_->setCodecConnectionCallbacks(*this); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index d8b9064e2ea2e..dab629591be63 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -237,6 +237,59 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } +// Test that cluster.http2_protocol_options.max_concurrent_streams limits +// concurrent requests and causes additional connections to be created. +TEST_F(Http2ConnPoolImplTest, MaxConcurrentRequestsPerStream) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + cluster_->http2_settings_.max_concurrent_streams_ = 1; + + InSequence s; + + { + // Create request and complete it + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + completeRequest(r); + } + + // Previous request completed, so this one will re-use the connection + { + ActiveTestRequest r(*this, 0, true); + completeRequest(r); + } + + // Two concurrent requests causes one additional connection to be created + { + ActiveTestRequest r1(*this, 0, true); + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + + // Complete one of them, and create another, and it will re-use the connection + completeRequest(r2); + ActiveTestRequest r3(*this, 1, true); + + completeRequest(r1); + completeRequest(r3); + } + + // Create two more requests; both should use existing connections + { + ActiveTestRequest r1(*this, 1, true); + ActiveTestRequest r2(*this, 0, true); + completeRequest(r1); + completeRequest(r2); + } + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_total_.value()); +} + // Verifies that requests are queued up in the conn pool until the connection becomes ready. TEST_F(Http2ConnPoolImplTest, PendingRequests) { InSequence s; From 8a53fd52181ae3cb7d2effcc5e4182d41909b043 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 14:02:13 -0800 Subject: [PATCH 12/41] Remove now-unneeded ENVOY_CONN_ID_LOG. It was used in an intermediate version of the change. Signed-off-by: Greg Greenway --- source/common/common/logger.h | 3 --- source/common/http/conn_pool_base.cc | 2 +- source/common/http/conn_pool_base.h | 2 -- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 29787f3e4b935..1b92b38135a77 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -371,9 +371,6 @@ template class Loggable { #define ENVOY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ...) \ ENVOY_CONN_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, FORMAT, CONNECTION, ##__VA_ARGS__) -#define ENVOY_CONN_ID_LOG(LEVEL, FORMAT, CONNECTION_ID, ...) \ - ENVOY_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, "[C{}] " FORMAT, (CONNECTION_ID), ##__VA_ARGS__) - /** * Convenience macros for logging with a stream ID and a connection ID. */ diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 2005c443def0b..e8ceedabb77b7 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -391,7 +391,7 @@ void ConnPoolImplBase::ActiveClient::releaseResources() { } void ConnPoolImplBase::ActiveClient::onConnectTimeout() { - ENVOY_CONN_ID_LOG(debug, "connect timeout", connectionId()); + ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); close(); } diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 6567332d1c20c..d68be4739a0de 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -54,8 +54,6 @@ class ConnPoolImplBase : public ConnectionPool::Instance, void onConnectTimeout(); void close() { codec_client_->close(); } - uint64_t connectionId() const { return codec_client_->id(); } - virtual bool hasActiveRequests() const PURE; virtual bool closingWithIncompleteRequest() const PURE; virtual StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) PURE; From 107abcbf125950bfd0c5a25490b4ff258bc74b8e Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 14:08:39 -0800 Subject: [PATCH 13/41] grammar/whitespace Signed-off-by: Greg Greenway --- test/common/http/http2/conn_pool_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index dab629591be63..2d18b53f718df 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -224,10 +224,10 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); - // This will move second connection to draining. + // This will move the second connection to draining. pool_.drainConnections(); - // This will destroy the 2 draining connections. + // This will destroy the 2 draining connections. test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_CALL(*this, onClientDestroy()).Times(2); From 6fb21fdcd6811f22a71f00cc1a47c93299117ab9 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 9 Jan 2020 17:14:07 -0800 Subject: [PATCH 14/41] Fix ASAN error when destructing a pool with connections still in it. Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 5 +++++ source/common/http/conn_pool_base.h | 6 ++++++ source/common/http/http1/conn_pool.cc | 2 ++ source/common/http/http1/conn_pool.h | 2 +- source/common/http/http2/conn_pool.cc | 2 ++ source/common/http/http2/conn_pool.h | 2 ++ 6 files changed, 18 insertions(+), 1 deletion(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index e8ceedabb77b7..202db68df5149 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -13,6 +13,11 @@ ConnPoolImplBase::ConnPoolImplBase( transport_socket_options_(transport_socket_options) {} ConnPoolImplBase::~ConnPoolImplBase() { + ASSERT(ready_clients_.empty()); + ASSERT(busy_clients_.empty()); +} + +void ConnPoolImplBase::destructAllConnections() { for (auto* list : {&ready_clients_, &busy_clients_}) { while (!list->empty()) { list->front()->close(); diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index d68be4739a0de..5062e3336ce60 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -32,6 +32,12 @@ class ConnPoolImplBase : public ConnectionPool::Instance, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); virtual ~ConnPoolImplBase(); + // Closes and destroys all connections. This must be called in the destructor of + // derived classes because the derived ActiveClient will downcast parent_ to a more + // specific type of ConnPoolImplBase, but if the more specific part is already destructed + // (due to bottom-up destructor ordering in c++) that access will be invalid. + void destructAllConnections(); + // ActiveClient provides a base class for connection pool clients that handles connection timings // as well as managing the connection timeout. class ActiveClient : public LinkedObject, diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index be34cf2fff18c..0799351082dd1 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -36,6 +36,8 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha })), settings_(settings) {} +ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } + ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index c8a76c64f24c0..a86088bb828c9 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -35,7 +35,7 @@ class ConnPoolImpl : public ConnPoolImplBase { const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - ~ConnPoolImpl() override = default; + ~ConnPoolImpl() override; // ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http11; } diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index b689bb1ca05ed..06c60e38005b9 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -21,6 +21,8 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, transport_socket_options) {} +ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } + ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 2c78cc9c22106..541e7b259d287 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -28,6 +28,8 @@ class ConnPoolImpl : public ConnPoolImplBase { const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + ~ConnPoolImpl() override; + // Http::ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http2; } From f145d104b3d698c7a68b334508fd3337b62cf5d5 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 10 Jan 2020 07:44:34 -0800 Subject: [PATCH 15/41] Fix ASAN failure in test code (mutate vector while iterating) Signed-off-by: Greg Greenway --- test/common/http/http1/conn_pool_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 8f6026f0d8627..96d7579d2eb86 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -437,12 +437,12 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); // Cleanup, cause the connections to go away. - for (auto& test_client : conn_pool_.test_clients_) { + while (!conn_pool_.test_clients_.empty()) { EXPECT_CALL( cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); EXPECT_CALL(conn_pool_, onClientDestroy()); - test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_clients_.front().connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } } From 32252a63df7576f876dd5f17fb15282521c1d140 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Mon, 13 Jan 2020 14:30:45 -0800 Subject: [PATCH 16/41] apply api change to v3alpha to fix CI Signed-off-by: Greg Greenway --- api/envoy/config/core/v3alpha/protocol.proto | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api/envoy/config/core/v3alpha/protocol.proto b/api/envoy/config/core/v3alpha/protocol.proto index b8df2357a20e9..132fdcb5b38ae 100644 --- a/api/envoy/config/core/v3alpha/protocol.proto +++ b/api/envoy/config/core/v3alpha/protocol.proto @@ -136,6 +136,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; From 7f0656b9d3b9012b5e73e4457f83d42c273c3a3c Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Tue, 14 Jan 2020 08:22:08 -0800 Subject: [PATCH 17/41] code review fixes Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 30 +++++++++++++-------------- source/common/http/conn_pool_base.h | 2 +- source/common/http/http2/conn_pool.cc | 2 +- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 202db68df5149..c57760229e85b 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -31,7 +31,7 @@ void ConnPoolImplBase::destructAllConnections() { void ConnPoolImplBase::createNewConnection() { ENVOY_LOG(debug, "creating a new connection"); ActiveClientPtr client = instantiateActiveClient(); - client->state_ = ActiveClient::State::CONNECTING; + ASSERT(client->state_ == ActiveClient::State::CONNECTING); client->moveIntoList(std::move(client), owningList(client->state_)); } @@ -52,9 +52,9 @@ void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, StreamDecoder if (client.remaining_requests_ == 0) { ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", *client.codec_client_); host_->cluster().stats().upstream_cx_max_requests_.inc(); - setActiveClientState(client, ActiveClient::State::DRAINING); + transitionActiveClientState(client, ActiveClient::State::DRAINING); } else if (client.codec_client_->numActiveRequests() >= client.concurrent_request_limit_) { - setActiveClientState(client, ActiveClient::State::BUSY); + transitionActiveClientState(client, ActiveClient::State::BUSY); } host_->stats().rq_total_.inc(); @@ -75,13 +75,13 @@ void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attachin host_->cluster().resourceManager(priority_).requests().dec(); if (client.state_ == ActiveClient::State::DRAINING && client.codec_client_->numActiveRequests() == 0) { - // Close out the draining client if we no long have active requests. + // Close out the draining client if we no longer have active requests. client.codec_client_->close(); } else if (client.state_ == ActiveClient::State::BUSY) { // A request was just ended, so we should be below the limit now. ASSERT(client.codec_client_->numActiveRequests() < client.concurrent_request_limit_); - setActiveClientState(client, ActiveClient::State::READY); + transitionActiveClientState(client, ActiveClient::State::READY); if (!delay_attaching_request) { onUpstreamReady(); } @@ -98,7 +98,7 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(Http::StreamDecoder& re } if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - bool can_create_connection = + const bool can_create_connection = host_->cluster().resourceManager(priority_).connections().canCreate(); if (!can_create_connection) { host_->cluster().stats().upstream_cx_overflow_.inc(); @@ -166,7 +166,8 @@ ConnPoolImplBase::owningList(ActiveClient::State state) { NOT_REACHED_GCOVR_EXCL_LINE; } -void ConnPoolImplBase::setActiveClientState(ActiveClient& client, ActiveClient::State new_state) { +void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, + ActiveClient::State new_state) { auto& old_list = owningList(client.state_); auto& new_list = owningList(new_state); client.state_ = new_state; @@ -198,14 +199,14 @@ void ConnPoolImplBase::drainConnections() { // so all remaining entries in ready_clients_ are serving requests. Move them and all entries // in busy_clients_ to draining. while (!ready_clients_.empty()) { - setActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); + transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); } // Changing busy_clients_ to DRAINING does not move them between lists, // so use a for-loop since the list is not mutated. ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); for (auto& busy_client : busy_clients_) { - setActiveClientState(*busy_client, ActiveClient::State::DRAINING); + transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); } } @@ -233,12 +234,11 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, client.codec_client_->connectionFailureReason()); Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - if (client.closingWithIncompleteRequest()) { + const bool incomplete_request = client.closingWithIncompleteRequest(); + if (incomplete_request) { Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); } - const bool check_for_drained = client.closingWithIncompleteRequest(); - if (client.state_ == ActiveClient::State::CONNECTING) { host_->cluster().stats().upstream_cx_connect_fail_.inc(); host_->stats().cx_connect_fail_.inc(); @@ -256,11 +256,11 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, // We need to release our resourceManager() resources before checking below for // whether we can create a new connection. Normally this would happen when // client's destructor runs, but this object needs to be deferredDelete'd(), so - // this forces part of it's cleanup to happen now. + // this forces part of its cleanup to happen now. client.releaseResources(); dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); - if (check_for_drained) { + if (incomplete_request) { checkForDrained(); } @@ -277,7 +277,7 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, client.conn_connect_ms_.reset(); ASSERT(client.state_ == ActiveClient::State::CONNECTING); - setActiveClientState(client, ActiveClient::State::READY); + transitionActiveClientState(client, ActiveClient::State::READY); onUpstreamReady(); checkForDrained(); diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 5062e3336ce60..450f1557025b7 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -122,7 +122,7 @@ class ConnPoolImplBase : public ConnectionPool::Instance, void onRequestClosed(ActiveClient& client, bool delay_attaching_request); // Changes the state_ of an ActiveClient and moves to the appropriate list. - void setActiveClientState(ActiveClient& client, ActiveClient::State new_state); + void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); void checkForDrained(); diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 06c60e38005b9..e9c0ba63e3cf8 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -33,7 +33,7 @@ void ConnPoolImpl::onGoAway(ActiveClient& client) { if (client.codec_client_->numActiveRequests() == 0) { client.codec_client_->close(); } else { - setActiveClientState(client, ActiveClient::State::DRAINING); + transitionActiveClientState(client, ActiveClient::State::DRAINING); } } } From 4f95f95a5c1c9e152982d427c540ddd9edd0d38a Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 12:18:49 -0800 Subject: [PATCH 18/41] Add more test coverage for h2 draining Signed-off-by: Greg Greenway --- test/common/http/http2/conn_pool_test.cc | 56 ++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 2d18b53f718df..762dd2e165069 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -198,6 +198,62 @@ void Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTes */ TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_.host()); } +/** + * Verify that idle connections are closed immediately when draining. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnectionIdle) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + completeRequest(r); + + EXPECT_CALL(*this, onClientDestroy()); + pool_.drainConnections(); +} + +/** + * Verify that a ready connection with a request in progress is moved to + * draining and closes when the request completes. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnectionReadyWithRequest) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + pool_.drainConnections(); + + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + EXPECT_CALL(*this, onClientDestroy()); + r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); +} + +/** + * Verify that a busy connection is moved to draining and closes when all requests + * complete. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnectionBusy) { + cluster_->http2_settings_.max_concurrent_streams_ = 1; + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + pool_.drainConnections(); + + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + EXPECT_CALL(*this, onClientDestroy()); + r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); +} + /** * Verify that connections are drained when requested. */ From 97086dc01d955c7a042d62720869346d0246f275 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 12:23:34 -0800 Subject: [PATCH 19/41] Comment about list movement Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index c57760229e85b..f08e13ea996e6 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -171,6 +171,12 @@ void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, auto& old_list = owningList(client.state_); auto& new_list = owningList(new_state); client.state_ = new_state; + + // old_list and new_list can be equal when transitioning from BUSY to DRAINING. + // + // The documentation for list.splice() (which is what moveBetweenLists() calls) is + // unclear whether it is allowed for src and dst to be the same, so check here + // since it is a no-op anyways. if (&old_list != &new_list) { client.moveBetweenLists(old_list, new_list); } From 3370f1958d0c6936e6d6b883966bd84f75590c59 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 13:06:54 -0800 Subject: [PATCH 20/41] Make hasActiveConnections() O(1) Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 15 ++++----------- source/common/http/conn_pool_base.h | 3 +++ 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index f08e13ea996e6..a5e7ea6193319 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -57,6 +57,7 @@ void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, StreamDecoder transitionActiveClientState(client, ActiveClient::State::BUSY); } + num_active_requests_++; host_->stats().rq_total_.inc(); host_->stats().rq_active_.inc(); host_->cluster().stats().upstream_rq_total_.inc(); @@ -70,6 +71,8 @@ void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, StreamDecoder void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attaching_request) { ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.codec_client_, client.codec_client_->numActiveRequests()); + ASSERT(num_active_requests_ > 0); + num_active_requests_--; host_->stats().rq_active_.dec(); host_->cluster().stats().upstream_rq_active_.dec(); host_->cluster().resourceManager(priority_).requests().dec(); @@ -136,17 +139,7 @@ void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { } bool ConnPoolImplBase::hasActiveConnections() const { - if (!pending_requests_.empty() || !busy_clients_.empty()) { - return true; - } - - for (const auto& ready_client : ready_clients_) { - if (ready_client->hasActiveRequests()) { - return true; - } - } - - return false; + return (!pending_requests_.empty() || (num_active_requests_ > 0)); } std::list& diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 450f1557025b7..3be330dc89695 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -152,6 +152,9 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // Clients that are not ready to handle additional requests. std::list busy_clients_; + + // The number of requests currently attached to clients. + uint64_t num_active_requests_{0}; }; } // namespace Http } // namespace Envoy From 602e198bacc8925759469f4c164a728144707dbc Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 13:07:49 -0800 Subject: [PATCH 21/41] anytime -> any time Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 3be330dc89695..a85376616c4d9 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -118,7 +118,7 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // Closes any idle connections. void closeIdleConnections(); - // Called by derived classes anytime a request is completed or destroyed for any reason. + // Called by derived classes any time a request is completed or destroyed for any reason. void onRequestClosed(ActiveClient& client, bool delay_attaching_request); // Changes the state_ of an ActiveClient and moves to the appropriate list. From b017f14406696f8cb58511c5ec94da3ff4f1d572 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 13:19:55 -0800 Subject: [PATCH 22/41] Add coverage for stat upstream_cx_active Signed-off-by: Greg Greenway --- test/common/http/http2/conn_pool_test.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 762dd2e165069..3d10dc9e75e3b 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -542,6 +542,7 @@ TEST_F(Http2ConnPoolImplTest, RequestAndResponse) { expectClientConnect(0, r1); EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_active_.value()); EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); @@ -555,6 +556,7 @@ TEST_F(Http2ConnPoolImplTest, RequestAndResponse) { EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } @@ -576,6 +578,7 @@ TEST_F(Http2ConnPoolImplTest, LocalReset) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value()); EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value()); } TEST_F(Http2ConnPoolImplTest, RemoteReset) { @@ -595,6 +598,7 @@ TEST_F(Http2ConnPoolImplTest, RemoteReset) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_rx_reset_.value()); EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value()); } TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { From 898b4e668708d9c4ad683d637fd3c3becff596cb Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 13:20:31 -0800 Subject: [PATCH 23/41] naming: toClose -> to_close Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index a5e7ea6193319..b1aa649ebf9c7 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -177,16 +177,16 @@ void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, void ConnPoolImplBase::closeIdleConnections() { // Create a separate list of elements to close to avoid mutate-while-iterating problems. - std::list toClose; + std::list to_close; // Possibly-idle connections are always in the ready_clients_ list for (auto& client : ready_clients_) { if (!client->hasActiveRequests()) { - toClose.push_back(client.get()); + to_close.push_back(client.get()); } } - for (auto& entry : toClose) { + for (auto& entry : to_close) { entry->close(); } } From c921dd2a808c08cca530eb335cb43212fdaf7ab8 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 13:29:06 -0800 Subject: [PATCH 24/41] Better comment on special case Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index b1aa649ebf9c7..5efaea8346d87 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -107,7 +107,9 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(Http::StreamDecoder& re host_->cluster().stats().upstream_cx_overflow_.inc(); } - // If we have no connections at all, make one no matter what so we don't starve. + // If we are at the connection circuit-breaker limit due to other upstreams having + // too many open connections, and this upstream has no connections, always create one, to + // prevent pending requests being queued to this upstream with no way to be processed. if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { createNewConnection(); } From bff92b5da9e89d1062238a3a0b84fa5ac69e9351 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 13:49:17 -0800 Subject: [PATCH 25/41] fix_format again (did a merge eat this diff?) Signed-off-by: Greg Greenway --- api/envoy/config/core/v3/protocol.proto | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index dbbb4318e0cfe..77e7359fca000 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -136,6 +136,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; From 32fbddd1399ae54980b0ba4d7ae8a94b75f01198 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 14:05:59 -0800 Subject: [PATCH 26/41] Remove unused http1settings param Signed-off-by: Greg Greenway --- source/common/http/http1/conn_pool.cc | 4 +--- source/common/http/http1/conn_pool.h | 5 +---- source/common/upstream/cluster_manager_impl.cc | 1 - test/common/http/http1/conn_pool_test.cc | 2 +- 4 files changed, 3 insertions(+), 9 deletions(-) diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 0799351082dd1..78edc05f688c3 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -26,15 +26,13 @@ namespace Http1 { ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, transport_socket_options), upstream_ready_timer_(dispatcher_.createTimer([this]() { upstream_ready_enabled_ = false; onUpstreamReady(); - })), - settings_(settings) {} + })) {} ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index a86088bb828c9..59908e84cf173 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -32,7 +32,6 @@ class ConnPoolImpl : public ConnPoolImplBase { ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); ~ConnPoolImpl() override; @@ -96,7 +95,6 @@ class ConnPoolImpl : public ConnPoolImplBase { Event::TimerPtr upstream_ready_timer_; bool upstream_ready_enabled_{false}; - const Http1Settings settings_; }; /** @@ -107,9 +105,8 @@ class ProdConnPoolImpl : public ConnPoolImpl { ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImpl(dispatcher, host, priority, options, settings, transport_socket_options) {} + : ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options) {} // ConnPoolImpl CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 507de51b6a4c8..dbfa84cf6d590 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1340,7 +1340,6 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } else { return std::make_unique(dispatcher, host, priority, options, - host->cluster().http1Settings(), transport_socket_options); } } diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 96d7579d2eb86..85a193f15d91d 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -49,7 +49,7 @@ class ConnPoolImplForTest : public ConnPoolImpl { Upstream::ClusterInfoConstSharedPtr cluster, NiceMock* upstream_ready_timer) : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, Http1Settings(), nullptr), + Upstream::ResourcePriority::Default, nullptr, nullptr), api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer) {} From 1d8ce78bd0cf045928194b93ac8ec2eb071bfa1b Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 14:36:41 -0800 Subject: [PATCH 27/41] fix generated_api_shadow Signed-off-by: Greg Greenway --- generated_api_shadow/envoy/api/v2/core/protocol.proto | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/generated_api_shadow/envoy/api/v2/core/protocol.proto b/generated_api_shadow/envoy/api/v2/core/protocol.proto index c8b8cdc6dd354..322add56edf17 100644 --- a/generated_api_shadow/envoy/api/v2/core/protocol.proto +++ b/generated_api_shadow/envoy/api/v2/core/protocol.proto @@ -117,6 +117,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; From eb09181c4f7671d4cb46b8481aef824d2f51fdc3 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 17 Jan 2020 15:29:53 -0800 Subject: [PATCH 28/41] Add the original conn_pool code back as _legacy; add a runtime-feature to select the old or the new code. Signed-off-by: Greg Greenway --- source/common/http/BUILD | 12 + source/common/http/conn_pool_base_legacy.cc | 95 ++ source/common/http/conn_pool_base_legacy.h | 80 ++ source/common/http/http1/BUILD | 30 + source/common/http/http1/conn_pool.cc | 16 + source/common/http/http1/conn_pool.h | 6 + source/common/http/http1/conn_pool_legacy.cc | 356 +++++++ source/common/http/http1/conn_pool_legacy.h | 149 +++ source/common/http/http2/BUILD | 20 + source/common/http/http2/conn_pool.cc | 16 + source/common/http/http2/conn_pool.h | 6 + source/common/http/http2/conn_pool_legacy.cc | 309 ++++++ source/common/http/http2/conn_pool_legacy.h | 121 +++ source/common/runtime/runtime_features.cc | 2 + .../common/upstream/cluster_manager_impl.cc | 8 +- test/common/http/http1/BUILD | 24 + .../http/http1/conn_pool_legacy_test.cc | 957 ++++++++++++++++++ test/common/http/http2/BUILD | 19 + .../http/http2/conn_pool_legacy_test.cc | 774 ++++++++++++++ 19 files changed, 2996 insertions(+), 4 deletions(-) create mode 100644 source/common/http/conn_pool_base_legacy.cc create mode 100644 source/common/http/conn_pool_base_legacy.h create mode 100644 source/common/http/http1/conn_pool_legacy.cc create mode 100644 source/common/http/http1/conn_pool_legacy.h create mode 100644 source/common/http/http2/conn_pool_legacy.cc create mode 100644 source/common/http/http2/conn_pool_legacy.h create mode 100644 test/common/http/http1/conn_pool_legacy_test.cc create mode 100644 test/common/http/http2/conn_pool_legacy_test.cc diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 4af84291ebc5a..a4cebd7d799ec 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -124,6 +124,18 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "conn_pool_base_legacy_lib", + srcs = ["conn_pool_base_legacy.cc"], + hdrs = ["conn_pool_base_legacy.h"], + deps = [ + "//include/envoy/http:conn_pool_interface", + "//include/envoy/stats:timespan_interface", + "//source/common/common:linked_object", + "//source/common/stats:timespan_lib", + ], +) + envoy_cc_library( name = "conn_manager_config_interface", hdrs = ["conn_manager_config.h"], diff --git a/source/common/http/conn_pool_base_legacy.cc b/source/common/http/conn_pool_base_legacy.cc new file mode 100644 index 0000000000000..f41e6ea67b120 --- /dev/null +++ b/source/common/http/conn_pool_base_legacy.cc @@ -0,0 +1,95 @@ +#include "common/http/conn_pool_base_legacy.h" + +#include "common/stats/timespan_impl.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +ConnPoolImplBase::ActiveClient::ActiveClient(Event::Dispatcher& dispatcher, + const Upstream::ClusterInfo& cluster) + : connect_timer_(dispatcher.createTimer([this]() -> void { onConnectTimeout(); })) { + + conn_connect_ms_ = std::make_unique( + cluster.stats().upstream_cx_connect_ms_, dispatcher.timeSource()); + conn_length_ = std::make_unique( + cluster.stats().upstream_cx_length_ms_, dispatcher.timeSource()); + connect_timer_->enableTimer(cluster.connectTimeout()); +} + +void ConnPoolImplBase::ActiveClient::recordConnectionSetup() { + conn_connect_ms_->complete(); + conn_connect_ms_.reset(); +} + +void ConnPoolImplBase::ActiveClient::disarmConnectTimeout() { + if (connect_timer_) { + connect_timer_->disableTimer(); + connect_timer_.reset(); + } +} + +ConnPoolImplBase::ActiveClient::ConnectionState ConnPoolImplBase::ActiveClient::connectionState() { + // We don't track any failure state, as the client should be deferred destroyed once a failure + // event is handled. + if (connect_timer_) { + return Connecting; + } + + return Connected; +} + +ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, + ConnectionPool::Callbacks& callbacks) + : parent_(parent), decoder_(decoder), callbacks_(callbacks) { + parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); + parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); +} + +ConnPoolImplBase::PendingRequest::~PendingRequest() { + parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); +} + +ConnectionPool::Cancellable* +ConnPoolImplBase::newPendingRequest(StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks) { + ENVOY_LOG(debug, "queueing request due to no available connections"); + PendingRequestPtr pending_request(new PendingRequest(*this, decoder, callbacks)); + pending_request->moveIntoList(std::move(pending_request), pending_requests_); + return pending_requests_.front().get(); +} + +void ConnPoolImplBase::purgePendingRequests( + const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason) { + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + pending_requests_to_purge_ = std::move(pending_requests_); + while (!pending_requests_to_purge_.empty()) { + PendingRequestPtr request = + pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); + host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); + request->callbacks_.onPoolFailure(ConnectionPool::PoolFailureReason::ConnectionFailure, + failure_reason, host_description); + } +} + +void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { + ENVOY_LOG(debug, "cancelling pending request"); + if (!pending_requests_to_purge_.empty()) { + // If pending_requests_to_purge_ is not empty, it means that we are called from + // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests + // is down in the call stack). Remove this request from the list as it is cancelled, + // and there is no need to call its onPoolFailure callback. + request.removeFromList(pending_requests_to_purge_); + } else { + request.removeFromList(pending_requests_); + } + + host_->cluster().stats().upstream_rq_cancelled_.inc(); + checkForDrained(); +} + +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/conn_pool_base_legacy.h b/source/common/http/conn_pool_base_legacy.h new file mode 100644 index 0000000000000..5a2cba34708db --- /dev/null +++ b/source/common/http/conn_pool_base_legacy.h @@ -0,0 +1,80 @@ +#pragma once + +#include "envoy/http/conn_pool.h" +#include "envoy/stats/timespan.h" + +#include "common/common/linked_object.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Http { +namespace Legacy { + +// Base class that handles request queueing logic shared between connection pool implementations. +class ConnPoolImplBase : protected Logger::Loggable { +protected: + ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority) + : host_(host), priority_(priority) {} + virtual ~ConnPoolImplBase() = default; + + // ActiveClient provides a base class for connection pool clients that handles connection timings + // as well as managing the connection timeout. + class ActiveClient { + public: + ActiveClient(Event::Dispatcher& dispatcher, const Upstream::ClusterInfo& cluster); + virtual ~ActiveClient() { conn_length_->complete(); } + + virtual void onConnectTimeout() PURE; + + void recordConnectionSetup(); + void disarmConnectTimeout(); + + enum ConnectionState { Connecting, Connected }; + ConnectionState connectionState(); + + private: + Event::TimerPtr connect_timer_; + Stats::TimespanPtr conn_connect_ms_; + Stats::TimespanPtr conn_length_; + }; + + struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { + PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, + ConnectionPool::Callbacks& callbacks); + ~PendingRequest() override; + + // ConnectionPool::Cancellable + void cancel() override { parent_.onPendingRequestCancel(*this); } + + ConnPoolImplBase& parent_; + StreamDecoder& decoder_; + ConnectionPool::Callbacks& callbacks_; + }; + + using PendingRequestPtr = std::unique_ptr; + + // Creates a new PendingRequest and enqueues it into the request queue. + ConnectionPool::Cancellable* newPendingRequest(StreamDecoder& decoder, + ConnectionPool::Callbacks& callbacks); + // Removes the PendingRequest from the list of requests. Called when the PendingRequest is + // cancelled, e.g. when the stream is reset before a connection has been established. + void onPendingRequestCancel(PendingRequest& request); + + // Fails all pending requests, calling onPoolFailure on the associated callbacks. + void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason); + + // Must be implemented by sub class. Attempts to drain inactive clients. + virtual void checkForDrained() PURE; + + const Upstream::HostConstSharedPtr host_; + const Upstream::ResourcePriority priority_; + std::list pending_requests_; + // When calling purgePendingRequests, this list will be used to hold the requests we are about + // to purge. We need this if one cancelled requests cancels a different pending request + std::list pending_requests_to_purge_; +}; +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index aa56ba332bbc6..07d661269ac99 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -43,12 +43,42 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "conn_pool_legacy_lib", + srcs = ["conn_pool_legacy.cc"], + hdrs = ["conn_pool_legacy.h"], + external_deps = ["abseil_optional"], + deps = [ + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:timer_interface", + "//include/envoy/http:conn_pool_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:timespan_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:linked_object", + "//source/common/common:utility_lib", + "//source/common/http:codec_client_lib", + "//source/common/http:codec_wrappers_lib", + "//source/common/http:codes_lib", + "//source/common/http:conn_pool_base_legacy_lib", + "//source/common/http:headers_lib", + "//source/common/network:utility_lib", + "//source/common/runtime:runtime_lib", + "//source/common/stats:timespan_lib", + "//source/common/upstream:upstream_lib", + ], +) + envoy_cc_library( name = "conn_pool_lib", srcs = ["conn_pool.cc"], hdrs = ["conn_pool.h"], external_deps = ["abseil_optional"], deps = [ + ":conn_pool_legacy_lib", "//include/envoy/event:deferred_deletable", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 78edc05f688c3..5983f4bf2a014 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -14,6 +14,7 @@ #include "common/http/codec_client.h" #include "common/http/codes.h" #include "common/http/headers.h" +#include "common/http/http1/conn_pool_legacy.h" #include "common/network/utility.h" #include "common/runtime/runtime_impl.h" @@ -141,6 +142,21 @@ CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnect return codec; } +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.new_http1_connection_pool_behavior")) { + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); + } else { + return std::make_unique( + dispatcher, host, priority, options, transport_socket_options); + } +} + } // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 59908e84cf173..54d9bee6a3ec6 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -112,6 +112,12 @@ class ProdConnPoolImpl : public ConnPoolImpl { CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; }; +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + } // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/conn_pool_legacy.cc b/source/common/http/http1/conn_pool_legacy.cc new file mode 100644 index 0000000000000..6f85335298434 --- /dev/null +++ b/source/common/http/http1/conn_pool_legacy.cc @@ -0,0 +1,356 @@ +#include "common/http/http1/conn_pool_legacy.h" + +#include +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/http/codec.h" +#include "envoy/http/header_map.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/utility.h" +#include "common/http/codec_client.h" +#include "common/http/codes.h" +#include "common/http/headers.h" +#include "common/network/utility.h" +#include "common/runtime/runtime_impl.h" +#include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" + +#include "absl/strings/match.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { + +ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), + socket_options_(options), transport_socket_options_(transport_socket_options), + upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })) {} + +ConnPoolImpl::~ConnPoolImpl() { + while (!ready_clients_.empty()) { + ready_clients_.front()->codec_client_->close(); + } + + while (!busy_clients_.empty()) { + busy_clients_.front()->codec_client_->close(); + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void ConnPoolImpl::drainConnections() { + while (!ready_clients_.empty()) { + ready_clients_.front()->codec_client_->close(); + } + + // We drain busy clients by manually setting remaining requests to 1. Thus, when the next + // response completes the client will be destroyed. + for (const auto& client : busy_clients_) { + client->remaining_requests_ = 1; + } +} + +void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +bool ConnPoolImpl::hasActiveConnections() const { + return !pending_requests_.empty() || !busy_clients_.empty(); +} + +void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + ASSERT(!client.stream_wrapper_); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->stats().rq_total_.inc(); + client.stream_wrapper_ = std::make_unique(response_decoder, client); + callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_, + client.codec_client_->streamInfo()); +} + +void ConnPoolImpl::checkForDrained() { + if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_clients_.empty()) { + while (!ready_clients_.empty()) { + ready_clients_.front()->codec_client_->close(); + } + + for (const DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImpl::createNewConnection() { + ENVOY_LOG(debug, "creating a new connection"); + ActiveClientPtr client(new ActiveClient(*this)); + client->moveIntoList(std::move(client), busy_clients_); +} + +ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + if (!ready_clients_.empty()) { + ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); + ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); + attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); + return nullptr; + } + + if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + + // If we have no connections at all, make one no matter what so we don't starve. + if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { + createNewConnection(); + } + + return newPendingRequest(response_decoder, callbacks); + } else { + ENVOY_LOG(debug, "max pending requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } +} + +void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + // The client died. + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + ActiveClientPtr removed; + bool check_for_drained = true; + if (client.stream_wrapper_) { + if (!client.stream_wrapper_->decode_complete_) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + // There is an active request attached to this client. The underlying codec client will + // already have "reset" the stream to fire the reset callback. All we do here is just + // destroy the client. + removed = client.removeFromList(busy_clients_); + } else if (client.connectionState() == + ConnPoolImplBase::ActiveClient::ConnectionState::Connected) { + removed = client.removeFromList(ready_clients_); + check_for_drained = false; + } else { + // The only time this happens is if we actually saw a connect failure. + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + removed = client.removeFromList(busy_clients_); + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + ENVOY_CONN_LOG(debug, "purge pending, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); + purgePendingRequests(client.real_host_description_, + client.codec_client_->connectionFailureReason()); + } + + dispatcher_.deferredDelete(std::move(removed)); + + // If we have pending requests and we just lost a connection we should make a new one. + if (pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) { + createNewConnection(); + } + + if (check_for_drained) { + checkForDrained(); + } + } + + client.disarmConnectTimeout(); + + // Note that the order in this function is important. Concretely, we must destroy the connect + // timer before we process a connected idle client, because if this results in an immediate + // drain/destruction event, we key off of the existence of the connect timer above to determine + // whether the client is in the ready list (connected) or the busy list (failed to connect). + if (event == Network::ConnectionEvent::Connected) { + client.recordConnectionSetup(); + processIdleClient(client, false); + } +} + +void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { + // If we get a downstream reset to an attached client, we just blow it away. + client.codec_client_->close(); +} + +void ConnPoolImpl::onResponseComplete(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "response complete", *client.codec_client_); + if (!client.stream_wrapper_->encode_complete_) { + ENVOY_CONN_LOG(debug, "response before request complete", *client.codec_client_); + onDownstreamReset(client); + } else if (client.stream_wrapper_->close_connection_ || client.codec_client_->remoteClosed()) { + ENVOY_CONN_LOG(debug, "saw upstream close connection", *client.codec_client_); + onDownstreamReset(client); + } else if (client.remaining_requests_ > 0 && --client.remaining_requests_ == 0) { + ENVOY_CONN_LOG(debug, "maximum requests per connection", *client.codec_client_); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + onDownstreamReset(client); + } else { + // Upstream connection might be closed right after response is complete. Setting delay=true + // here to attach pending requests in next dispatcher loop to handle that case. + // https://github.com/envoyproxy/envoy/issues/2715 + processIdleClient(client, true); + } +} + +void ConnPoolImpl::onUpstreamReady() { + upstream_ready_enabled_ = false; + while (!pending_requests_.empty() && !ready_clients_.empty()) { + ActiveClient& client = *ready_clients_.front(); + ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); + // There is work to do so bind a request to the client and move it to the busy list. Pending + // requests are pushed onto the front, so pull from the back. + attachRequestToClient(client, pending_requests_.back()->decoder_, + pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + client.moveBetweenLists(ready_clients_, busy_clients_); + } +} + +void ConnPoolImpl::processIdleClient(ActiveClient& client, bool delay) { + client.stream_wrapper_.reset(); + if (pending_requests_.empty() || delay) { + // There is nothing to service or delayed processing is requested, so just move the connection + // into the ready list. + ENVOY_CONN_LOG(debug, "moving to ready", *client.codec_client_); + client.moveBetweenLists(busy_clients_, ready_clients_); + } else { + // There is work to do immediately so bind a request to the client and move it to the busy list. + // Pending requests are pushed onto the front, so pull from the back. + ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); + attachRequestToClient(client, pending_requests_.back()->decoder_, + pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } + + if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { + upstream_ready_enabled_ = true; + upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + } + + checkForDrained(); +} + +ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, ActiveClient& parent) + : StreamEncoderWrapper(parent.codec_client_->newStream(*this)), + StreamDecoderWrapper(response_decoder), parent_(parent) { + + StreamEncoderWrapper::inner_.getStream().addCallbacks(*this); + parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); + parent_.parent_.host_->stats().rq_active_.inc(); + + // TODO (tonya11en): At the time of writing, there is no way to mix different versions of HTTP + // traffic in the same cluster, so incrementing the request count in the per-cluster resource + // manager will not affect circuit breaking in any unexpected ways. Ideally, outstanding requests + // counts would be tracked the same way in all HTTP versions. + // + // See: https://github.com/envoyproxy/envoy/issues/9215 + parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().inc(); +} + +ConnPoolImpl::StreamWrapper::~StreamWrapper() { + parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); + parent_.parent_.host_->stats().rq_active_.dec(); + parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().dec(); +} + +void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } + +void ConnPoolImpl::StreamWrapper::decodeHeaders(HeaderMapPtr&& headers, bool end_stream) { + // If Connection: close OR + // Http/1.0 and not Connection: keep-alive OR + // Proxy-Connection: close + if ((headers->Connection() && + (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), + Headers::get().ConnectionValues.Close))) || + (parent_.codec_client_->protocol() == Protocol::Http10 && + (!headers->Connection() || + !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), + Headers::get().ConnectionValues.KeepAlive))) || + (headers->ProxyConnection() && + (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), + Headers::get().ConnectionValues.Close)))) { + parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); + close_connection_ = true; + } + + StreamDecoderWrapper::decodeHeaders(std::move(headers), end_stream); +} + +void ConnPoolImpl::StreamWrapper::onDecodeComplete() { + decode_complete_ = encode_complete_; + parent_.parent_.onResponseComplete(parent_); +} + +ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) + : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent), + remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { + + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + codec_client_ = parent_.createCodecClient(data); + codec_client_->addConnectionCallbacks(*this); + + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); + + codec_client_->setConnectionStats( + {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); +} + +ConnPoolImpl::ActiveClient::~ActiveClient() { + parent_.host_->cluster().stats().upstream_cx_active_.dec(); + parent_.host_->stats().cx_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); +} + +void ConnPoolImpl::ActiveClient::onConnectTimeout() { + // We just close the client at this point. This will result in both a timeout and a connect + // failure and will fold into all the normal connect failure logic. + ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); + parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + codec_client_->close(); +} + +CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { + CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_), + data.host_description_, dispatcher_)}; + return codec; +} + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/conn_pool_legacy.h b/source/common/http/http1/conn_pool_legacy.h new file mode 100644 index 0000000000000..e2f9aa0309888 --- /dev/null +++ b/source/common/http/http1/conn_pool_legacy.h @@ -0,0 +1,149 @@ +#pragma once + +#include +#include +#include + +#include "envoy/event/deferred_deletable.h" +#include "envoy/event/timer.h" +#include "envoy/http/codec.h" +#include "envoy/http/conn_pool.h" +#include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/linked_object.h" +#include "common/http/codec_client.h" +#include "common/http/codec_wrappers.h" +#include "common/http/conn_pool_base_legacy.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { + +/** + * A connection pool implementation for HTTP/1.1 connections. + * NOTE: The connection pool does NOT do DNS resolution. It assumes it is being given a numeric IP + * address. Higher layer code should handle resolving DNS on error and creating a new pool + * bound to a different IP address. + */ +class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { +public: + ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + + ~ConnPoolImpl() override; + + // ConnectionPool::Instance + Http::Protocol protocol() const override { return Http::Protocol::Http11; } + void addDrainedCallback(DrainedCb cb) override; + void drainConnections() override; + bool hasActiveConnections() const override; + ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + + // ConnPoolImplBase + void checkForDrained() override; + +protected: + struct ActiveClient; + + struct StreamWrapper : public StreamEncoderWrapper, + public StreamDecoderWrapper, + public StreamCallbacks { + StreamWrapper(StreamDecoder& response_decoder, ActiveClient& parent); + ~StreamWrapper() override; + + // StreamEncoderWrapper + void onEncodeComplete() override; + + // StreamDecoderWrapper + void decodeHeaders(HeaderMapPtr&& headers, bool end_stream) override; + void onPreDecodeComplete() override {} + void onDecodeComplete() override; + + // Http::StreamCallbacks + void onResetStream(StreamResetReason, absl::string_view) override { + parent_.parent_.onDownstreamReset(parent_); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + ActiveClient& parent_; + bool encode_complete_{}; + bool close_connection_{}; + bool decode_complete_{}; + }; + + using StreamWrapperPtr = std::unique_ptr; + + struct ActiveClient : ConnPoolImplBase::ActiveClient, + LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable { + ActiveClient(ConnPoolImpl& parent); + ~ActiveClient() override; + + void onConnectTimeout() override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, event); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + ConnPoolImpl& parent_; + CodecClientPtr codec_client_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + StreamWrapperPtr stream_wrapper_; + uint64_t remaining_requests_; + }; + + using ActiveClientPtr = std::unique_ptr; + + void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks); + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; + void createNewConnection(); + void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); + void onDownstreamReset(ActiveClient& client); + void onResponseComplete(ActiveClient& client); + void onUpstreamReady(); + void processIdleClient(ActiveClient& client, bool delay); + + Event::Dispatcher& dispatcher_; + std::list ready_clients_; + std::list busy_clients_; + std::list drained_callbacks_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Event::TimerPtr upstream_ready_timer_; + bool upstream_ready_enabled_{false}; +}; + +/** + * Production implementation of the ConnPoolImpl. + */ +class ProdConnPoolImpl : public ConnPoolImpl { +public: + ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options) {} + + // ConnPoolImpl + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; +}; + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index cb80a566c5aaf..23bc4134e7775 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -56,11 +56,31 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "conn_pool_legacy_lib", + srcs = ["conn_pool_legacy.cc"], + hdrs = ["conn_pool_legacy.h"], + deps = [ + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:timer_interface", + "//include/envoy/http:conn_pool_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/stats:timespan_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/http:codec_client_lib", + "//source/common/http:conn_pool_base_legacy_lib", + "//source/common/network:utility_lib", + "//source/common/stats:timespan_lib", + "//source/common/upstream:upstream_lib", + ], +) + envoy_cc_library( name = "conn_pool_lib", srcs = ["conn_pool.cc"], hdrs = ["conn_pool.h"], deps = [ + ":conn_pool_legacy_lib", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", "//include/envoy/http:conn_pool_interface", diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index e9c0ba63e3cf8..52fc5e2bb8b37 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -8,6 +8,7 @@ #include "envoy/upstream/upstream.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/conn_pool_legacy.h" #include "common/network/utility.h" namespace Envoy { @@ -94,6 +95,21 @@ CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnect return codec; } +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.new_http2_connection_pool_behavior")) { + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); + } else { + return std::make_unique( + dispatcher, host, priority, options, transport_socket_options); + } +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 541e7b259d287..ee103a4e7f0fd 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -84,6 +84,12 @@ class ProdConnPoolImpl : public ConnPoolImpl { CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; }; +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http2/conn_pool_legacy.cc b/source/common/http/http2/conn_pool_legacy.cc new file mode 100644 index 0000000000000..9e4c271fd6a5b --- /dev/null +++ b/source/common/http/http2/conn_pool_legacy.cc @@ -0,0 +1,309 @@ +#include "common/http/http2/conn_pool_legacy.h" + +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/upstream/upstream.h" + +#include "common/http/http2/codec_impl.h" +#include "common/network/utility.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), + socket_options_(options), transport_socket_options_(transport_socket_options) {} + +ConnPoolImpl::~ConnPoolImpl() { + if (primary_client_) { + primary_client_->client_->close(); + } + + if (draining_client_) { + draining_client_->client_->close(); + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void ConnPoolImpl::ConnPoolImpl::drainConnections() { + if (primary_client_ != nullptr) { + movePrimaryClientToDraining(); + } +} + +void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +bool ConnPoolImpl::hasActiveConnections() const { + if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { + return true; + } + + if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { + return true; + } + + return !pending_requests_.empty(); +} + +void ConnPoolImpl::checkForDrained() { + if (drained_callbacks_.empty()) { + return; + } + + bool drained = true; + if (primary_client_) { + if (primary_client_->client_->numActiveRequests() == 0) { + primary_client_->client_->close(); + ASSERT(!primary_client_); + } else { + drained = false; + } + } + + ASSERT(!draining_client_ || (draining_client_->client_->numActiveRequests() > 0)); + if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { + drained = false; + } + + if (drained) { + ENVOY_LOG(debug, "invoking drained callbacks"); + for (const DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImpl::newClientStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { + ENVOY_LOG(debug, "max requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + } else { + ENVOY_CONN_LOG(debug, "creating stream", *primary_client_->client_); + primary_client_->total_streams_++; + host_->stats().rq_total_.inc(); + host_->stats().rq_active_.inc(); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->cluster().stats().upstream_rq_active_.inc(); + host_->cluster().resourceManager(priority_).requests().inc(); + callbacks.onPoolReady(primary_client_->client_->newStream(response_decoder), + primary_client_->real_host_description_, + primary_client_->client_->streamInfo()); + } +} + +ConnectionPool::Cancellable* ConnPoolImpl::newStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + ASSERT(drained_callbacks_.empty()); + + // First see if we need to handle max streams rollover. + uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); + if (max_streams == 0) { + max_streams = maxTotalStreams(); + } + + if (primary_client_ && primary_client_->total_streams_ >= max_streams) { + movePrimaryClientToDraining(); + } + + if (!primary_client_) { + primary_client_ = std::make_unique(*this); + } + + // If the primary client is not connected yet, queue up the request. + if (!primary_client_->upstream_ready_) { + // If we're not allowed to enqueue more requests, fail fast. + if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + ENVOY_LOG(debug, "max pending requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } + + return newPendingRequest(response_decoder, callbacks); + } + + // We already have an active client that's connected to upstream, so attempt to establish a + // new stream. + newClientStream(response_decoder, callbacks); + return nullptr; +} + +void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + ENVOY_CONN_LOG(debug, "client disconnected", *client.client_); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + if (client.closed_with_active_rq_) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + if (client.connectionState() == ConnPoolImplBase::ActiveClient::ConnectionState::Connecting) { + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + purgePendingRequests(client.real_host_description_, + client.client_->connectionFailureReason()); + } + + if (&client == primary_client_.get()) { + ENVOY_CONN_LOG(debug, "destroying primary client", *client.client_); + dispatcher_.deferredDelete(std::move(primary_client_)); + } else { + ENVOY_CONN_LOG(debug, "destroying draining client", *client.client_); + dispatcher_.deferredDelete(std::move(draining_client_)); + } + + if (client.closed_with_active_rq_) { + checkForDrained(); + } + } + + if (event == Network::ConnectionEvent::Connected) { + client.recordConnectionSetup(); + + client.upstream_ready_ = true; + onUpstreamReady(); + } + + client.disarmConnectTimeout(); +} + +void ConnPoolImpl::movePrimaryClientToDraining() { + ENVOY_CONN_LOG(debug, "moving primary to draining", *primary_client_->client_); + if (draining_client_) { + // This should pretty much never happen, but is possible if we start draining and then get + // a goaway for example. In this case just kill the current draining connection. It's not + // worth keeping a list. + draining_client_->client_->close(); + } + + ASSERT(!draining_client_); + if (primary_client_->client_->numActiveRequests() == 0) { + // If we are making a new connection and the primary does not have any active requests just + // close it now. + primary_client_->client_->close(); + } else { + draining_client_ = std::move(primary_client_); + } + + ASSERT(!primary_client_); +} + +void ConnPoolImpl::onConnectTimeout(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "connect timeout", *client.client_); + host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + client.client_->close(); +} + +void ConnPoolImpl::onGoAway(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "remote goaway", *client.client_); + host_->cluster().stats().upstream_cx_close_notify_.inc(); + if (&client == primary_client_.get()) { + movePrimaryClientToDraining(); + } +} + +void ConnPoolImpl::onStreamDestroy(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.client_, + client.client_->numActiveRequests()); + host_->stats().rq_active_.dec(); + host_->cluster().stats().upstream_rq_active_.dec(); + host_->cluster().resourceManager(priority_).requests().dec(); + if (&client == draining_client_.get() && client.client_->numActiveRequests() == 0) { + // Close out the draining client if we no long have active requests. + client.client_->close(); + } + + // If we are destroying this stream because of a disconnect, do not check for drain here. We will + // wait until the connection has been fully drained of streams and then check in the connection + // event callback. + if (!client.closed_with_active_rq_) { + checkForDrained(); + } +} + +void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason reason) { + if (reason == StreamResetReason::ConnectionTermination || + reason == StreamResetReason::ConnectionFailure) { + host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); + client.closed_with_active_rq_ = true; + } else if (reason == StreamResetReason::LocalReset) { + host_->cluster().stats().upstream_rq_tx_reset_.inc(); + } else if (reason == StreamResetReason::RemoteReset) { + host_->cluster().stats().upstream_rq_rx_reset_.inc(); + } +} + +void ConnPoolImpl::onUpstreamReady() { + // Establishes new codec streams for each pending request. + while (!pending_requests_.empty()) { + newClientStream(pending_requests_.back()->decoder_, pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } +} + +ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) + : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent) { + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + client_ = parent_.createCodecClient(data); + client_->addConnectionCallbacks(*this); + client_->setCodecClientCallbacks(*this); + client_->setCodecConnectionCallbacks(*this); + + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); + + client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); +} + +ConnPoolImpl::ActiveClient::~ActiveClient() { + parent_.host_->stats().cx_active_.dec(); + parent_.host_->cluster().stats().upstream_cx_active_.dec(); +} + +CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { + CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP2, std::move(data.connection_), + data.host_description_, dispatcher_)}; + return codec; +} + +uint32_t ProdConnPoolImpl::maxTotalStreams() { return MAX_STREAMS; } + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/conn_pool_legacy.h b/source/common/http/http2/conn_pool_legacy.h new file mode 100644 index 0000000000000..6fef4d989de0b --- /dev/null +++ b/source/common/http/http2/conn_pool_legacy.h @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +#include "envoy/event/timer.h" +#include "envoy/http/conn_pool.h" +#include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" +#include "envoy/upstream/upstream.h" + +#include "common/http/codec_client.h" +#include "common/http/conn_pool_base_legacy.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +/** + * Implementation of a "connection pool" for HTTP/2. This mainly handles stats as well as + * shifting to a new connection if we reach max streams on the primary. This is a base class + * used for both the prod implementation as well as the testing one. + */ +class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { +public: + ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + ~ConnPoolImpl() override; + + // Http::ConnectionPool::Instance + Http::Protocol protocol() const override { return Http::Protocol::Http2; } + void addDrainedCallback(DrainedCb cb) override; + void drainConnections() override; + bool hasActiveConnections() const override; + ConnectionPool::Cancellable* newStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + +protected: + struct ActiveClient : ConnPoolImplBase::ActiveClient, + public Network::ConnectionCallbacks, + public CodecClientCallbacks, + public Event::DeferredDeletable, + public Http::ConnectionCallbacks { + ActiveClient(ConnPoolImpl& parent); + ~ActiveClient() override; + + void onConnectTimeout() override { parent_.onConnectTimeout(*this); } + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, event); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + // CodecClientCallbacks + void onStreamDestroy() override { parent_.onStreamDestroy(*this); } + void onStreamReset(Http::StreamResetReason reason) override { + parent_.onStreamReset(*this, reason); + } + + // Http::ConnectionCallbacks + void onGoAway() override { parent_.onGoAway(*this); } + + ConnPoolImpl& parent_; + CodecClientPtr client_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + uint64_t total_streams_{}; + bool upstream_ready_{}; + bool closed_with_active_rq_{}; + }; + + using ActiveClientPtr = std::unique_ptr; + + // Http::ConnPoolImplBase + void checkForDrained() override; + + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; + virtual uint32_t maxTotalStreams() PURE; + void movePrimaryClientToDraining(); + void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); + void onConnectTimeout(ActiveClient& client); + void onGoAway(ActiveClient& client); + void onStreamDestroy(ActiveClient& client); + void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); + void newClientStream(Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); + void onUpstreamReady(); + + Event::Dispatcher& dispatcher_; + ActiveClientPtr primary_client_; + ActiveClientPtr draining_client_; + std::list drained_callbacks_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; +}; + +/** + * Production implementation of the HTTP/2 connection pool. + */ +class ProdConnPoolImpl : public ConnPoolImpl { +public: + using ConnPoolImpl::ConnPoolImpl; + +private: + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; + uint32_t maxTotalStreams() override; + + // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe + // side we do 2^29. + static const uint64_t MAX_STREAMS = (1 << 29); +}; + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 0f2432524e26d..9d88371b309aa 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -32,6 +32,8 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.strict_authority_validation", "envoy.reloadable_features.reject_unsupported_transfer_encodings", "envoy.reloadable_features.strict_method_validation", + "envoy.reloadable_features.new_http1_connection_pool_behavior", + "envoy.reloadable_features.new_http2_connection_pool_behavior", }; // This is a section for officially sanctioned runtime features which are too diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index dbfa84cf6d590..f23868422afcb 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1333,14 +1333,14 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { if (protocol == Http::Protocol::Http2 && runtime_.snapshot().featureEnabled("upstream.use_http2", 100)) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); + return Http::Http2::allocateConnPool(dispatcher, host, priority, options, + transport_socket_options); } else if (protocol == Http::Protocol::Http3) { // Quic connection pool is not implemented. NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } else { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); + return Http::Http1::allocateConnPool(dispatcher, host, priority, options, + transport_socket_options); } } diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index e117fa725623c..64e734e4620d0 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -63,3 +63,27 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_test( + name = "conn_pool_legacy_test", + srcs = ["conn_pool_legacy_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/event:dispatcher_lib", + "//source/common/http:codec_client_lib", + "//source/common/http/http1:conn_pool_legacy_lib", + "//source/common/network:utility_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//test/common/http:common_lib", + "//test/common/upstream:utility_lib", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/common/http/http1/conn_pool_legacy_test.cc b/test/common/http/http1/conn_pool_legacy_test.cc new file mode 100644 index 0000000000000..5c85386d1da17 --- /dev/null +++ b/test/common/http/http1/conn_pool_legacy_test.cc @@ -0,0 +1,957 @@ +#include +#include + +#include "envoy/http/codec.h" + +#include "common/buffer/buffer_impl.h" +#include "common/event/dispatcher_impl.h" +#include "common/http/codec_client.h" +#include "common/http/http1/conn_pool_legacy.h" +#include "common/network/utility.h" +#include "common/upstream/upstream_impl.h" + +#include "test/common/http/common.h" +#include "test/common/upstream/utility.h" +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/printers.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::DoAll; +using testing::InSequence; +using testing::Invoke; +using testing::NiceMock; +using testing::Property; +using testing::Return; +using testing::ReturnRef; +using testing::SaveArg; + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { +namespace { + +/** + * A test version of ConnPoolImpl that allows for mocking beneath the codec clients. + */ +class ConnPoolImplForTest : public ConnPoolImpl { +public: + ConnPoolImplForTest(Event::MockDispatcher& dispatcher, + Upstream::ClusterInfoConstSharedPtr cluster, + NiceMock* upstream_ready_timer) + : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), + Upstream::ResourcePriority::Default, nullptr, nullptr), + api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), + mock_upstream_ready_timer_(upstream_ready_timer) {} + + ~ConnPoolImplForTest() override { + EXPECT_EQ(0U, ready_clients_.size()); + EXPECT_EQ(0U, busy_clients_.size()); + EXPECT_EQ(0U, pending_requests_.size()); + } + + struct TestCodecClient { + Http::MockClientConnection* codec_; + Network::MockClientConnection* connection_; + CodecClient* codec_client_; + Event::MockTimer* connect_timer_; + Event::DispatcherPtr client_dispatcher_; + }; + + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { + // We expect to own the connection, but already have it, so just release it to prevent it from + // getting deleted. + data.connection_.release(); + return CodecClientPtr{createCodecClient_()}; + } + + MOCK_METHOD0(createCodecClient_, CodecClient*()); + MOCK_METHOD0(onClientDestroy, void()); + + void expectClientCreate(Protocol protocol = Protocol::Http11) { + test_clients_.emplace_back(); + TestCodecClient& test_client = test_clients_.back(); + test_client.connection_ = new NiceMock(); + test_client.codec_ = new NiceMock(); + test_client.connect_timer_ = new NiceMock(&mock_dispatcher_); + std::shared_ptr cluster{new NiceMock()}; + test_client.client_dispatcher_ = api_->allocateDispatcher(); + Network::ClientConnectionPtr connection{test_client.connection_}; + test_client.codec_client_ = new CodecClientForTest( + CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, + [this](CodecClient* codec_client) -> void { + for (auto i = test_clients_.begin(); i != test_clients_.end(); i++) { + if (i->codec_client_ == codec_client) { + onClientDestroy(); + test_clients_.erase(i); + return; + } + } + }, + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); + EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce(Return(test_client.connection_)); + EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); + ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); + } + + void expectEnableUpstreamReady() { + EXPECT_FALSE(upstream_ready_enabled_); + EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + } + + void expectAndRunUpstreamReady() { + EXPECT_TRUE(upstream_ready_enabled_); + mock_upstream_ready_timer_->invokeCallback(); + EXPECT_FALSE(upstream_ready_enabled_); + } + + Api::ApiPtr api_; + Event::MockDispatcher& mock_dispatcher_; + NiceMock* mock_upstream_ready_timer_; + std::vector test_clients_; +}; + +/** + * Test fixture for all connection pool tests. + */ +class Http1ConnPoolImplTest : public testing::Test { +public: + Http1ConnPoolImplTest() + : upstream_ready_timer_(new NiceMock(&dispatcher_)), + conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} + + ~Http1ConnPoolImplTest() override { + EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + } + + NiceMock dispatcher_; + std::shared_ptr cluster_{new NiceMock()}; + NiceMock* upstream_ready_timer_; + ConnPoolImplForTest conn_pool_; + NiceMock runtime_; +}; + +/** + * Helper for dealing with an active test request. + */ +struct ActiveTestRequest { + enum class Type { Pending, CreateConnection, Immediate }; + + ActiveTestRequest(Http1ConnPoolImplTest& parent, size_t client_index, Type type) + : parent_(parent), client_index_(client_index) { + uint64_t active_rq_observed = + parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count(); + uint64_t current_rq_total = parent_.cluster_->stats_.upstream_rq_total_.value(); + if (type == Type::CreateConnection) { + parent.conn_pool_.expectClientCreate(); + } + + if (type == Type::Immediate) { + expectNewStream(); + } + + handle_ = parent.conn_pool_.newStream(outer_decoder_, callbacks_); + + if (type == Type::Immediate) { + EXPECT_EQ(nullptr, handle_); + } else { + EXPECT_NE(nullptr, handle_); + } + + if (type == Type::CreateConnection) { + EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); + expectNewStream(); + parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( + Network::ConnectionEvent::Connected); + } + if (type != Type::Pending) { + EXPECT_EQ(current_rq_total + 1, parent_.cluster_->stats_.upstream_rq_total_.value()); + EXPECT_EQ(active_rq_observed + 1, + parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default) + .requests() + .count()); + } + } + + void completeResponse(bool with_body) { + // Test additional metric writes also. + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":status", "200"}, {"x-envoy-upstream-canary", "true"}}); + + inner_decoder_->decodeHeaders(std::move(response_headers), !with_body); + if (with_body) { + Buffer::OwnedImpl data; + inner_decoder_->decodeData(data, true); + } + } + + void expectNewStream() { + EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(request_encoder_))); + EXPECT_CALL(callbacks_.pool_ready_, ready()); + } + + void startRequest() { callbacks_.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); } + + Http1ConnPoolImplTest& parent_; + size_t client_index_; + NiceMock outer_decoder_; + Http::ConnectionPool::Cancellable* handle_{}; + NiceMock request_encoder_; + Http::StreamDecoder* inner_decoder_{}; + ConnPoolCallbacks callbacks_; +}; + +/** + * Verify that the pool's host is a member of the cluster the pool was constructed with. + */ +TEST_F(Http1ConnPoolImplTest, Host) { EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); } + +/** + * Verify that connections are drained when requested. + */ +TEST_F(Http1ConnPoolImplTest, DrainConnections) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + + ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); + r2.startRequest(); + + r1.completeResponse(false); + + // This will destroy the ready client and set requests remaining to 1 on the busy client. + conn_pool_.drainConnections(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + // This will destroy the busy client when the response finishes. + r2.completeResponse(false); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test all timing stats are set. + */ +TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + r1.completeResponse(false); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test that buffer limits are set. + */ +TEST_F(Http1ConnPoolImplTest, VerifyBufferLimits) { + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); + EXPECT_CALL(*conn_pool_.test_clients_.back().connection_, setBufferLimits(8192)); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(callbacks.pool_failure_, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Verify that canceling pending connections within the callback works. + */ +TEST_F(Http1ConnPoolImplTest, VerifyCancelInCallback) { + Http::ConnectionPool::Cancellable* handle1{}; + // In this scenario, all connections must succeed, so when + // one fails, the others are canceled. + // Note: We rely on the fact that the implementation cancels the second request first, + // to simplify the test. + ConnPoolCallbacks callbacks1; + EXPECT_CALL(callbacks1.pool_failure_, ready()).Times(0); + ConnPoolCallbacks callbacks2; + EXPECT_CALL(callbacks2.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { + handle1->cancel(); + })); + + NiceMock outer_decoder; + // Create the first client. + conn_pool_.expectClientCreate(); + handle1 = conn_pool_.newStream(outer_decoder, callbacks1); + ASSERT_NE(nullptr, handle1); + + // Create the second client. + Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder, callbacks2); + ASSERT_NE(nullptr, handle2); + + // Simulate connection failure. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Tests a request that generates a new connection, completes, and then a second request that uses + * the same connection. + */ +TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) { + InSequence s; + + // Request 1 should kick off a new connection. + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + r1.completeResponse(false); + + // Request 2 should not. + ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Immediate); + r2.startRequest(); + r2.completeResponse(true); + + // Cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test when we overflow max pending requests. + */ +TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { + cluster_->resetResourceManager(1, 1, 1024, 1, 1); + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); + + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + EXPECT_CALL(callbacks2.pool_failure_, ready()); + Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder2, callbacks2); + EXPECT_EQ(nullptr, handle2); + + EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); + + handle->cancel(); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value()); +} + +/** + * Tests a connection failure before a request is bound which should result in the pending request + * getting purged. + */ +TEST_F(Http1ConnPoolImplTest, ConnectFailure) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + EXPECT_CALL(callbacks.pool_failure_, ready()); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); +} + +/** + * Tests that connection creation time is recorded correctly even in cases where + * there are multiple pending connection creation attempts to the same upstream. + */ +TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { + constexpr uint64_t sleep1_ms = 20; + constexpr uint64_t sleep2_ms = 10; + constexpr uint64_t sleep3_ms = 5; + Event::SimulatedTimeSystem simulated_time; + + // Allow concurrent creation of 2 upstream connections. + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + + InSequence s; + + // Start the first connect attempt. + conn_pool_.expectClientCreate(); + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); + + // Move time forward and start the second connect attempt. + simulated_time.sleep(std::chrono::milliseconds(sleep1_ms)); + conn_pool_.expectClientCreate(); + ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::Pending); + + // Move time forward, signal that the first connect completed and verify the time to connect. + uint64_t upstream_cx_connect_ms1 = 0; + simulated_time.sleep(std::chrono::milliseconds(sleep2_ms)); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) + .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); + r1.expectNewStream(); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); + + // Move time forward, signal that the second connect completed and verify the time to connect. + uint64_t upstream_cx_connect_ms2 = 0; + simulated_time.sleep(std::chrono::milliseconds(sleep3_ms)); + EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) + .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); + r2.expectNewStream(); + conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); + + // Cleanup, cause the connections to go away. + for (auto& test_client : conn_pool_.test_clients_) { + EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL( + cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + } + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Tests a connect timeout. Also test that we can add a new request during ejection processing. + */ +TEST_F(Http1ConnPoolImplTest, ConnectTimeout) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder1; + ConnPoolCallbacks callbacks1; + conn_pool_.expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder1, callbacks1)); + + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { + conn_pool_.expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder2, callbacks2)); + })); + + conn_pool_.test_clients_[0].connect_timer_->invokeCallback(); + + EXPECT_CALL(callbacks2.pool_failure_, ready()); + conn_pool_.test_clients_[1].connect_timer_->invokeCallback(); + + EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value()); +} + +/** + * Test cancelling before the request is bound to a connection. + */ +TEST_F(Http1ConnPoolImplTest, CancelBeforeBound) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + handle->cancel(); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test an upstream disconnection while there is a bound request. + */ +TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // We should get a reset callback when the connection disconnects. + Http::MockStreamCallbacks stream_callbacks; + EXPECT_CALL(stream_callbacks, onResetStream(StreamResetReason::ConnectionTermination, _)); + request_encoder.getStream().addCallbacks(stream_callbacks); + + // Kill the connection while it has an active request. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test that we correctly handle reaching max connections. + */ +TEST_F(Http1ConnPoolImplTest, MaxConnections) { + InSequence s; + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.cx_open_.value()); + + // Request 1 should kick off a new connection. + NiceMock outer_decoder1; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + + EXPECT_NE(nullptr, handle); + + // Request 2 should not kick off a new connection. + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + handle = conn_pool_.newStream(outer_decoder2, callbacks2); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); + EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value()); + + EXPECT_NE(nullptr, handle); + + // Connect event will bind to request 1. + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Finishing request 1 will immediately bind to request 2. + conn_pool_.expectEnableUpstreamReady(); + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks2.pool_ready_, ready()); + + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + Http::HeaderMapPtr response_headers(new TestHeaderMapImpl{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + conn_pool_.expectAndRunUpstreamReady(); + callbacks2.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. + response_headers = std::make_unique( + std::initializer_list>{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + // Cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test when upstream closes connection without 'connection: close' like + * https://github.com/envoyproxy/envoy/pull/2715 + */ +TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder1; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + + EXPECT_NE(nullptr, handle); + + // Request 2 should not kick off a new connection. + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + handle = conn_pool_.newStream(outer_decoder2, callbacks2); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); + + EXPECT_NE(nullptr, handle); + + // Connect event will bind to request 1. + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Finishing request 1 will schedule binding the connection to request 2. + conn_pool_.expectEnableUpstreamReady(); + + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + Http::HeaderMapPtr response_headers(new TestHeaderMapImpl{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + // Cause the connection to go away. + conn_pool_.expectClientCreate(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + conn_pool_.expectAndRunUpstreamReady(); + + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks2.pool_ready_, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + callbacks2.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. + response_headers = std::make_unique( + std::initializer_list>{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test when upstream sends us 'connection: close' + */ +TEST_F(Http1ConnPoolImplTest, ConnectionCloseHeader) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response with 'connection: close' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":status", "200"}, {"Connection", "Close"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test when upstream sends us 'proxy-connection: close' + */ +TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeader) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response with 'proxy-connection: close' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test when upstream is HTTP/1.0 and does not send 'connection: keep-alive' + */ +TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAlive) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(Protocol::Http10); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response without 'connection: keep-alive' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test when we reach max requests per connection. + */ +TEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) { + InSequence s; + + cluster_->max_requests_per_connection_ = 1; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response with 'connection: close' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers(new TestHeaderMapImpl{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value()); +} + +TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + + ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); + r2.startRequest(); + + ActiveTestRequest r3(*this, 0, ActiveTestRequest::Type::Pending); + + // Finish r1, which gets r3 going. + conn_pool_.expectEnableUpstreamReady(); + r3.expectNewStream(); + + r1.completeResponse(false); + conn_pool_.expectAndRunUpstreamReady(); + r3.startRequest(); + EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value()); + + r2.completeResponse(false); + r3.completeResponse(false); + + // Disconnect both clients. + EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); + conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http1ConnPoolImplTest, DrainCallback) { + InSequence s; + ReadyWatcher drained; + + EXPECT_CALL(drained, ready()); + conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); + r2.handle_->cancel(); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); + + EXPECT_CALL(drained, ready()); + r1.startRequest(); + r1.completeResponse(false); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +// Test draining a connection pool that has a pending connection. +TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { + InSequence s; + ReadyWatcher drained; + + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + handle->cancel(); + EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(drained, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { + InSequence s; + + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + inner_decoder->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, false); + Buffer::OwnedImpl dummy_data("12345"); + inner_decoder->decodeData(dummy_data, false); + + Buffer::OwnedImpl empty_data; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) + .WillOnce(Invoke([&](Buffer::Instance& data) -> void { + // Simulate the onResponseComplete call to decodeData since dispatch is mocked out. + inner_decoder->decodeData(data, true); + })); + + EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http1ConnPoolImplTest, NoActiveConnectionsByDefault) { + EXPECT_FALSE(conn_pool_.hasActiveConnections()); +} + +TEST_F(Http1ConnPoolImplTest, ActiveRequestHasActiveConnectionsTrue) { + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + + EXPECT_TRUE(conn_pool_.hasActiveConnections()); + + // cleanup + r1.completeResponse(false); + conn_pool_.drainConnections(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http1ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnections) { + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + r1.completeResponse(false); + + EXPECT_FALSE(conn_pool_.hasActiveConnections()); + + conn_pool_.drainConnections(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { + conn_pool_.expectClientCreate(); + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); + + EXPECT_TRUE(conn_pool_.hasActiveConnections()); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + r1.handle_->cancel(); + EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); + conn_pool_.drainConnections(); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +} // namespace +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index ded96525c02b0..6c8387fbc2d4c 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -64,6 +64,25 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "conn_pool_legacy_test", + srcs = ["conn_pool_legacy_test.cc"], + deps = [ + "//source/common/event:dispatcher_lib", + "//source/common/http/http2:conn_pool_legacy_lib", + "//source/common/network:utility_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//test/common/http:common_lib", + "//test/common/upstream:utility_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + envoy_cc_test_library( name = "http2_frame", srcs = ["http2_frame.cc"], diff --git a/test/common/http/http2/conn_pool_legacy_test.cc b/test/common/http/http2/conn_pool_legacy_test.cc new file mode 100644 index 0000000000000..715f5e56c5103 --- /dev/null +++ b/test/common/http/http2/conn_pool_legacy_test.cc @@ -0,0 +1,774 @@ +#include +#include +#include + +#include "common/event/dispatcher_impl.h" +#include "common/http/http2/conn_pool_legacy.h" +#include "common/network/utility.h" +#include "common/upstream/upstream_impl.h" + +#include "test/common/http/common.h" +#include "test/common/upstream/utility.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/printers.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::DoAll; +using testing::InSequence; +using testing::Invoke; +using testing::NiceMock; +using testing::Property; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +class TestConnPoolImpl : public ConnPoolImpl { +public: + using ConnPoolImpl::ConnPoolImpl; + + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { + // We expect to own the connection, but already have it, so just release it to prevent it from + // getting deleted. + data.connection_.release(); + return CodecClientPtr{createCodecClient_(data)}; + } + + MOCK_METHOD1(createCodecClient_, CodecClient*(Upstream::Host::CreateConnectionData& data)); + + uint32_t maxTotalStreams() override { return max_streams_; } + + uint32_t max_streams_{std::numeric_limits::max()}; +}; + +class ActiveTestRequest; + +class Http2ConnPoolImplTest : public testing::Test { +public: + struct TestCodecClient { + Http::MockClientConnection* codec_; + Network::MockClientConnection* connection_; + CodecClientForTest* codec_client_; + Event::MockTimer* connect_timer_; + Event::DispatcherPtr client_dispatcher_; + }; + + Http2ConnPoolImplTest() + : api_(Api::createApiForTest(stats_store_)), + pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} + + ~Http2ConnPoolImplTest() override { + EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + } + + // Creates a new test client, expecting a new connection to be created and associated + // with the new client. + void expectClientCreate(absl::optional buffer_limits = {}) { + test_clients_.emplace_back(); + TestCodecClient& test_client = test_clients_.back(); + test_client.connection_ = new NiceMock(); + test_client.codec_ = new NiceMock(); + test_client.connect_timer_ = new NiceMock(&dispatcher_); + test_client.client_dispatcher_ = api_->allocateDispatcher(); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce(Return(test_client.connection_)); + auto cluster = std::make_shared>(); + Network::ClientConnectionPtr connection{test_client.connection_}; + test_client.codec_client_ = new CodecClientForTest( + CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, + [this](CodecClient*) -> void { onClientDestroy(); }, + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + if (buffer_limits) { + EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(*buffer_limits)); + EXPECT_CALL(*test_clients_.back().connection_, setBufferLimits(*buffer_limits)); + } + EXPECT_CALL(pool_, createCodecClient_(_)) + .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { + return test_clients_.back().codec_client_; + })); + } + + // Connects a pending connection for client with the given index, asserting + // that the provided request receives onPoolReady. + void expectClientConnect(size_t index, ActiveTestRequest& r); + // Asserts that onPoolReady is called on the request. + void expectStreamConnect(size_t index, ActiveTestRequest& r); + + // Resets the connection belonging to the provided index, asserting that the + // provided request receives onPoolFailure. + void expectClientReset(size_t index, ActiveTestRequest& r); + // Asserts that the provided requests receives onPoolFailure. + void expectStreamReset(ActiveTestRequest& r); + + /** + * Closes a test client. + */ + void closeClient(size_t index); + + /** + * Completes an active request. Useful when this flow is not part of the main test assertions. + */ + void completeRequest(ActiveTestRequest& r); + + /** + * Completes an active request and closes the upstream connection. Useful when this flow is + * not part of the main test assertions. + */ + void completeRequestCloseUpstream(size_t index, ActiveTestRequest& r); + + MOCK_METHOD0(onClientDestroy, void()); + + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + NiceMock dispatcher_; + std::shared_ptr cluster_{new NiceMock()}; + Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; + TestConnPoolImpl pool_; + std::vector test_clients_; + NiceMock runtime_; +}; + +class ActiveTestRequest { +public: + ActiveTestRequest(Http2ConnPoolImplTest& test, size_t client_index, bool expect_connected) { + if (expect_connected) { + EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_))); + EXPECT_CALL(callbacks_.pool_ready_, ready()); + EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); + } else { + EXPECT_NE(nullptr, test.pool_.newStream(decoder_, callbacks_)); + } + } + + Http::MockStreamDecoder decoder_; + ConnPoolCallbacks callbacks_; + Http::StreamDecoder* inner_decoder_{}; + NiceMock inner_encoder_; +}; + +void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { + expectStreamConnect(index, r); + EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer()); + test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected); +} + +void Http2ConnPoolImplTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { + EXPECT_CALL(*test_clients_[index].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_))); + EXPECT_CALL(r.callbacks_.pool_ready_, ready()); +} + +void Http2ConnPoolImplTest::expectClientReset(size_t index, ActiveTestRequest& r) { + expectStreamReset(r); + EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); + test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +void Http2ConnPoolImplTest::expectStreamReset(ActiveTestRequest& r) { + EXPECT_CALL(r.callbacks_.pool_failure_, ready()); +} + +void Http2ConnPoolImplTest::closeClient(size_t index) { + test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +void Http2ConnPoolImplTest::completeRequest(ActiveTestRequest& r) { + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); +} + +void Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTestRequest& r) { + completeRequest(r); + closeClient(index); +} + +/** + * Verify that the pool retains and returns the host it was constructed with. + */ +TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_.host()); } + +/** + * Verify that connections are drained when requested. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnections) { + InSequence s; + pool_.max_streams_ = 1; + + // Test drain connections call prior to any connections being created. + pool_.drainConnections(); + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // This will move primary to draining and destroy draining. + pool_.drainConnections(); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + // This will destroy draining. + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that requests are queued up in the conn pool until the connection becomes ready. +TEST_F(Http2ConnPoolImplTest, PendingRequests) { + InSequence s; + + // Create three requests. These should be queued up. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + expectStreamConnect(0, r1); + expectStreamConnect(0, r2); + expectClientConnect(0, r3); + + // Send a request through each stream. + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); + r3.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // Since we now have an active connection, subsequent requests should connect immediately. + ActiveTestRequest r4(*this, 0, true); + + // Clean up everything. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that requests are queued up in the conn pool and fail when the connection +// fails to be established. +TEST_F(Http2ConnPoolImplTest, PendingRequestsFailure) { + InSequence s; + pool_.max_streams_ = 10; + + // Create three requests. These should be queued up. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + // Note that these occur in reverse order due to the order we purge pending requests in. + expectStreamReset(r3); + expectStreamReset(r2); + expectClientReset(0, r1); + + expectClientCreate(); + // Since we have no active connection, subsequence requests will queue until + // the new connection is established. + ActiveTestRequest r4(*this, 1, false); + expectClientConnect(1, r4); + + // Clean up everything. + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that requests are queued up in the conn pool and respect max request circuit breaking +// when the connection is established. +TEST_F(Http2ConnPoolImplTest, PendingRequestsRequestOverflow) { + InSequence s; + + // Inflate the resource count to just under the limit. + auto& requests = host_->cluster().resourceManager(Upstream::ResourcePriority::Default).requests(); + for (uint64_t i = 0; i < requests.max() - 1; ++i) { + requests.inc(); + } + + // Create three requests. These should be queued up. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // We queued up three requests, but we can only afford one before hitting the circuit + // breaker. Thus, we expect to see 2 resets and one successful connect. + expectStreamConnect(0, r1); + expectStreamReset(r2); + expectStreamReset(r3); + EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Clean up everything. + for (uint64_t i = 0; i < requests.max() - 1; ++i) { + requests.dec(); + } + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that we honor the max pending requests circuit breaker. +TEST_F(Http2ConnPoolImplTest, PendingRequestsMaxPendingCircuitBreaker) { + InSequence s; + + // Inflate the resource count to just under the limit. + auto& pending_reqs = + host_->cluster().resourceManager(Upstream::ResourcePriority::Default).pendingRequests(); + for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { + pending_reqs.inc(); + } + + // Create two requests. The first one should be enqueued, while the second one + // should fail fast due to us being above the max pending requests limit. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + + Http::MockStreamDecoder decoder; + ConnPoolCallbacks callbacks; + EXPECT_CALL(callbacks.pool_failure_, ready()); + EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + + expectStreamConnect(0, r1); + EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Clean up everything. + for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { + pending_reqs.dec(); + } + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplTest, VerifyConnectionTimingStats) { + InSequence s; + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +/** + * Test that buffer limits are set. + */ +TEST_F(Http2ConnPoolImplTest, VerifyBufferLimits) { + InSequence s; + expectClientCreate(8192); + ActiveTestRequest r1(*this, 0, false); + + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplTest, RequestAndResponse) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + ActiveTestRequest r2(*this, 0, true); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplTest, LocalReset) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, false); + r1.callbacks_.outer_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value()); + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); +} + +TEST_F(Http2ConnPoolImplTest, RemoteReset) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, false); + r1.inner_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_rx_reset_.value()); + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); +} + +TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ReadyWatcher drained; + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ReadyWatcher drained; + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplTest, DrainPrimary) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ReadyWatcher drained; + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + ReadyWatcher drained; + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http2ConnPoolImplTest, ConnectTimeout) { + InSequence s; + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + EXPECT_CALL(r1.callbacks_.pool_failure_, ready()); + test_clients_[0].connect_timer_->invokeCallback(); + + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_timeout_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplTest, MaxGlobalRequests) { + cluster_->resetResourceManager(1024, 1024, 1, 1, 1); + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ConnPoolCallbacks callbacks; + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks.pool_failure_, ready()); + EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplTest, GoAway) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].codec_client_->raiseGoAway(); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_close_notify_.value()); +} + +TEST_F(Http2ConnPoolImplTest, NoActiveConnectionsByDefault) { + EXPECT_FALSE(pool_.hasActiveConnections()); +} + +// Show that an active request on the primary connection is considered active. +TEST_F(Http2ConnPoolImplTest, ActiveConnectionsHasActiveRequestsTrue) { + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + + EXPECT_TRUE(pool_.hasActiveConnections()); + + completeRequestCloseUpstream(0, r1); +} + +// Show that pending requests are considered active. +TEST_F(Http2ConnPoolImplTest, PendingRequestsConsideredActive) { + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + + EXPECT_TRUE(pool_.hasActiveConnections()); + + expectClientConnect(0, r1); + completeRequestCloseUpstream(0, r1); +} + +// Show that even if there is a primary client still, if all of its requests have completed, then it +// does not have any active connections. +TEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnections) { + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + completeRequest(r1); + + EXPECT_FALSE(pool_.hasActiveConnections()); + + closeClient(0); +} + +// Show that if connections are draining, they're still considered active. +TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { + pool_.max_streams_ = 1; + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + pool_.drainConnections(); + + EXPECT_TRUE(pool_.hasActiveConnections()); + + completeRequest(r1); + closeClient(0); +} + +// Show that once we've drained all connections, there are no longer any active. +TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { + pool_.max_streams_ = 1; + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + pool_.drainConnections(); + completeRequest(r1); + + EXPECT_FALSE(pool_.hasActiveConnections()); + + closeClient(0); +} +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy From 562bbfec9988cbe5910568796b1592565e3b5a49 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Tue, 21 Jan 2020 08:14:38 -0800 Subject: [PATCH 29/41] Rename legacy test class to avoid a name conflict to make gtest happy Signed-off-by: Greg Greenway --- .../http/http1/conn_pool_legacy_test.cc | 64 +++++++++-------- .../http/http2/conn_pool_legacy_test.cc | 70 +++++++++---------- 2 files changed, 68 insertions(+), 66 deletions(-) diff --git a/test/common/http/http1/conn_pool_legacy_test.cc b/test/common/http/http1/conn_pool_legacy_test.cc index 5c85386d1da17..adad3d7a83b7d 100644 --- a/test/common/http/http1/conn_pool_legacy_test.cc +++ b/test/common/http/http1/conn_pool_legacy_test.cc @@ -126,13 +126,13 @@ class ConnPoolImplForTest : public ConnPoolImpl { /** * Test fixture for all connection pool tests. */ -class Http1ConnPoolImplTest : public testing::Test { +class Http1ConnPoolImplLegacyTest : public testing::Test { public: - Http1ConnPoolImplTest() + Http1ConnPoolImplLegacyTest() : upstream_ready_timer_(new NiceMock(&dispatcher_)), conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} - ~Http1ConnPoolImplTest() override { + ~Http1ConnPoolImplLegacyTest() override { EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); } @@ -149,7 +149,7 @@ class Http1ConnPoolImplTest : public testing::Test { struct ActiveTestRequest { enum class Type { Pending, CreateConnection, Immediate }; - ActiveTestRequest(Http1ConnPoolImplTest& parent, size_t client_index, Type type) + ActiveTestRequest(Http1ConnPoolImplLegacyTest& parent, size_t client_index, Type type) : parent_(parent), client_index_(client_index) { uint64_t active_rq_observed = parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count(); @@ -205,7 +205,7 @@ struct ActiveTestRequest { void startRequest() { callbacks_.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); } - Http1ConnPoolImplTest& parent_; + Http1ConnPoolImplLegacyTest& parent_; size_t client_index_; NiceMock outer_decoder_; Http::ConnectionPool::Cancellable* handle_{}; @@ -217,12 +217,14 @@ struct ActiveTestRequest { /** * Verify that the pool's host is a member of the cluster the pool was constructed with. */ -TEST_F(Http1ConnPoolImplTest, Host) { EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); } +TEST_F(Http1ConnPoolImplLegacyTest, Host) { + EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); +} /** * Verify that connections are drained when requested. */ -TEST_F(Http1ConnPoolImplTest, DrainConnections) { +TEST_F(Http1ConnPoolImplLegacyTest, DrainConnections) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; @@ -248,7 +250,7 @@ TEST_F(Http1ConnPoolImplTest, DrainConnections) { /** * Test all timing stats are set. */ -TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { +TEST_F(Http1ConnPoolImplLegacyTest, VerifyTimingStats) { EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); EXPECT_CALL(cluster_->stats_store_, @@ -266,7 +268,7 @@ TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { /** * Test that buffer limits are set. */ -TEST_F(Http1ConnPoolImplTest, VerifyBufferLimits) { +TEST_F(Http1ConnPoolImplLegacyTest, VerifyBufferLimits) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; conn_pool_.expectClientCreate(); @@ -284,7 +286,7 @@ TEST_F(Http1ConnPoolImplTest, VerifyBufferLimits) { /** * Verify that canceling pending connections within the callback works. */ -TEST_F(Http1ConnPoolImplTest, VerifyCancelInCallback) { +TEST_F(Http1ConnPoolImplLegacyTest, VerifyCancelInCallback) { Http::ConnectionPool::Cancellable* handle1{}; // In this scenario, all connections must succeed, so when // one fails, the others are canceled. @@ -317,7 +319,7 @@ TEST_F(Http1ConnPoolImplTest, VerifyCancelInCallback) { * Tests a request that generates a new connection, completes, and then a second request that uses * the same connection. */ -TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) { +TEST_F(Http1ConnPoolImplLegacyTest, MultipleRequestAndResponse) { InSequence s; // Request 1 should kick off a new connection. @@ -339,7 +341,7 @@ TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) { /** * Test when we overflow max pending requests. */ -TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { +TEST_F(Http1ConnPoolImplLegacyTest, MaxPendingRequests) { cluster_->resetResourceManager(1, 1, 1024, 1, 1); EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); @@ -371,7 +373,7 @@ TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { * Tests a connection failure before a request is bound which should result in the pending request * getting purged. */ -TEST_F(Http1ConnPoolImplTest, ConnectFailure) { +TEST_F(Http1ConnPoolImplLegacyTest, ConnectFailure) { InSequence s; // Request 1 should kick off a new connection. @@ -395,7 +397,7 @@ TEST_F(Http1ConnPoolImplTest, ConnectFailure) { * Tests that connection creation time is recorded correctly even in cases where * there are multiple pending connection creation attempts to the same upstream. */ -TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { +TEST_F(Http1ConnPoolImplLegacyTest, MeasureConnectTime) { constexpr uint64_t sleep1_ms = 20; constexpr uint64_t sleep2_ms = 10; constexpr uint64_t sleep3_ms = 5; @@ -451,7 +453,7 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { /** * Tests a connect timeout. Also test that we can add a new request during ejection processing. */ -TEST_F(Http1ConnPoolImplTest, ConnectTimeout) { +TEST_F(Http1ConnPoolImplLegacyTest, ConnectTimeout) { InSequence s; // Request 1 should kick off a new connection. @@ -483,7 +485,7 @@ TEST_F(Http1ConnPoolImplTest, ConnectTimeout) { /** * Test cancelling before the request is bound to a connection. */ -TEST_F(Http1ConnPoolImplTest, CancelBeforeBound) { +TEST_F(Http1ConnPoolImplLegacyTest, CancelBeforeBound) { InSequence s; // Request 1 should kick off a new connection. @@ -505,7 +507,7 @@ TEST_F(Http1ConnPoolImplTest, CancelBeforeBound) { /** * Test an upstream disconnection while there is a bound request. */ -TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { +TEST_F(Http1ConnPoolImplLegacyTest, DisconnectWhileBound) { InSequence s; // Request 1 should kick off a new connection. @@ -537,7 +539,7 @@ TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { /** * Test that we correctly handle reaching max connections. */ -TEST_F(Http1ConnPoolImplTest, MaxConnections) { +TEST_F(Http1ConnPoolImplLegacyTest, MaxConnections) { InSequence s; EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.cx_open_.value()); @@ -595,7 +597,7 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { * Test when upstream closes connection without 'connection: close' like * https://github.com/envoyproxy/envoy/pull/2715 */ -TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { +TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseWithoutHeader) { InSequence s; // Request 1 should kick off a new connection. @@ -657,7 +659,7 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { /** * Test when upstream sends us 'connection: close' */ -TEST_F(Http1ConnPoolImplTest, ConnectionCloseHeader) { +TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseHeader) { InSequence s; // Request 1 should kick off a new connection. @@ -690,7 +692,7 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseHeader) { /** * Test when upstream sends us 'proxy-connection: close' */ -TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeader) { +TEST_F(Http1ConnPoolImplLegacyTest, ProxyConnectionCloseHeader) { InSequence s; // Request 1 should kick off a new connection. @@ -723,7 +725,7 @@ TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeader) { /** * Test when upstream is HTTP/1.0 and does not send 'connection: keep-alive' */ -TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAlive) { +TEST_F(Http1ConnPoolImplLegacyTest, Http10NoConnectionKeepAlive) { InSequence s; // Request 1 should kick off a new connection. @@ -756,7 +758,7 @@ TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAlive) { /** * Test when we reach max requests per connection. */ -TEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) { +TEST_F(Http1ConnPoolImplLegacyTest, MaxRequestsPerConnection) { InSequence s; cluster_->max_requests_per_connection_ = 1; @@ -788,7 +790,7 @@ TEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value()); } -TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { +TEST_F(Http1ConnPoolImplLegacyTest, ConcurrentConnections) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); InSequence s; @@ -822,7 +824,7 @@ TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http1ConnPoolImplTest, DrainCallback) { +TEST_F(Http1ConnPoolImplLegacyTest, DrainCallback) { InSequence s; ReadyWatcher drained; @@ -843,7 +845,7 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) { } // Test draining a connection pool that has a pending connection. -TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { +TEST_F(Http1ConnPoolImplLegacyTest, DrainWhileConnecting) { InSequence s; ReadyWatcher drained; @@ -864,7 +866,7 @@ TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { dispatcher_.clearDeferredDeleteList(); } -TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { +TEST_F(Http1ConnPoolImplLegacyTest, RemoteCloseToCompleteResponse) { InSequence s; NiceMock outer_decoder; @@ -904,11 +906,11 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http1ConnPoolImplTest, NoActiveConnectionsByDefault) { +TEST_F(Http1ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { EXPECT_FALSE(conn_pool_.hasActiveConnections()); } -TEST_F(Http1ConnPoolImplTest, ActiveRequestHasActiveConnectionsTrue) { +TEST_F(Http1ConnPoolImplLegacyTest, ActiveRequestHasActiveConnectionsTrue) { ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); r1.startRequest(); @@ -921,7 +923,7 @@ TEST_F(Http1ConnPoolImplTest, ActiveRequestHasActiveConnectionsTrue) { dispatcher_.clearDeferredDeleteList(); } -TEST_F(Http1ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnections) { +TEST_F(Http1ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); r1.startRequest(); r1.completeResponse(false); @@ -933,7 +935,7 @@ TEST_F(Http1ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection dispatcher_.clearDeferredDeleteList(); } -TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { +TEST_F(Http1ConnPoolImplLegacyTest, PendingRequestIsConsideredActive) { conn_pool_.expectClientCreate(); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); diff --git a/test/common/http/http2/conn_pool_legacy_test.cc b/test/common/http/http2/conn_pool_legacy_test.cc index 715f5e56c5103..ffc5339e9d2b5 100644 --- a/test/common/http/http2/conn_pool_legacy_test.cc +++ b/test/common/http/http2/conn_pool_legacy_test.cc @@ -53,7 +53,7 @@ class TestConnPoolImpl : public ConnPoolImpl { class ActiveTestRequest; -class Http2ConnPoolImplTest : public testing::Test { +class Http2ConnPoolImplLegacyTest : public testing::Test { public: struct TestCodecClient { Http::MockClientConnection* codec_; @@ -63,11 +63,11 @@ class Http2ConnPoolImplTest : public testing::Test { Event::DispatcherPtr client_dispatcher_; }; - Http2ConnPoolImplTest() + Http2ConnPoolImplLegacyTest() : api_(Api::createApiForTest(stats_store_)), pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} - ~Http2ConnPoolImplTest() override { + ~Http2ConnPoolImplLegacyTest() override { EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); } @@ -141,7 +141,7 @@ class Http2ConnPoolImplTest : public testing::Test { class ActiveTestRequest { public: - ActiveTestRequest(Http2ConnPoolImplTest& test, size_t client_index, bool expect_connected) { + ActiveTestRequest(Http2ConnPoolImplLegacyTest& test, size_t client_index, bool expect_connected) { if (expect_connected) { EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_))); @@ -158,42 +158,42 @@ class ActiveTestRequest { NiceMock inner_encoder_; }; -void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { +void Http2ConnPoolImplLegacyTest::expectClientConnect(size_t index, ActiveTestRequest& r) { expectStreamConnect(index, r); EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer()); test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected); } -void Http2ConnPoolImplTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { +void Http2ConnPoolImplLegacyTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { EXPECT_CALL(*test_clients_[index].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_))); EXPECT_CALL(r.callbacks_.pool_ready_, ready()); } -void Http2ConnPoolImplTest::expectClientReset(size_t index, ActiveTestRequest& r) { +void Http2ConnPoolImplLegacyTest::expectClientReset(size_t index, ActiveTestRequest& r) { expectStreamReset(r); EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); } -void Http2ConnPoolImplTest::expectStreamReset(ActiveTestRequest& r) { +void Http2ConnPoolImplLegacyTest::expectStreamReset(ActiveTestRequest& r) { EXPECT_CALL(r.callbacks_.pool_failure_, ready()); } -void Http2ConnPoolImplTest::closeClient(size_t index) { +void Http2ConnPoolImplLegacyTest::closeClient(size_t index) { test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } -void Http2ConnPoolImplTest::completeRequest(ActiveTestRequest& r) { +void Http2ConnPoolImplLegacyTest::completeRequest(ActiveTestRequest& r) { EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); r.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); } -void Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTestRequest& r) { +void Http2ConnPoolImplLegacyTest::completeRequestCloseUpstream(size_t index, ActiveTestRequest& r) { completeRequest(r); closeClient(index); } @@ -201,12 +201,12 @@ void Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTes /** * Verify that the pool retains and returns the host it was constructed with. */ -TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_.host()); } +TEST_F(Http2ConnPoolImplLegacyTest, Host) { EXPECT_EQ(host_, pool_.host()); } /** * Verify that connections are drained when requested. */ -TEST_F(Http2ConnPoolImplTest, DrainConnections) { +TEST_F(Http2ConnPoolImplLegacyTest, DrainConnections) { InSequence s; pool_.max_streams_ = 1; @@ -240,7 +240,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { } // Verifies that requests are queued up in the conn pool until the connection becomes ready. -TEST_F(Http2ConnPoolImplTest, PendingRequests) { +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequests) { InSequence s; // Create three requests. These should be queued up. @@ -278,7 +278,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequests) { // Verifies that requests are queued up in the conn pool and fail when the connection // fails to be established. -TEST_F(Http2ConnPoolImplTest, PendingRequestsFailure) { +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsFailure) { InSequence s; pool_.max_streams_ = 10; @@ -311,7 +311,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsFailure) { // Verifies that requests are queued up in the conn pool and respect max request circuit breaking // when the connection is established. -TEST_F(Http2ConnPoolImplTest, PendingRequestsRequestOverflow) { +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsRequestOverflow) { InSequence s; // Inflate the resource count to just under the limit. @@ -347,7 +347,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsRequestOverflow) { } // Verifies that we honor the max pending requests circuit breaker. -TEST_F(Http2ConnPoolImplTest, PendingRequestsMaxPendingCircuitBreaker) { +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsMaxPendingCircuitBreaker) { InSequence s; // Inflate the resource count to just under the limit. @@ -383,7 +383,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsMaxPendingCircuitBreaker) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http2ConnPoolImplTest, VerifyConnectionTimingStats) { +TEST_F(Http2ConnPoolImplLegacyTest, VerifyConnectionTimingStats) { InSequence s; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -408,7 +408,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyConnectionTimingStats) { /** * Test that buffer limits are set. */ -TEST_F(Http2ConnPoolImplTest, VerifyBufferLimits) { +TEST_F(Http2ConnPoolImplLegacyTest, VerifyBufferLimits) { InSequence s; expectClientCreate(8192); ActiveTestRequest r1(*this, 0, false); @@ -427,7 +427,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyBufferLimits) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http2ConnPoolImplTest, RequestAndResponse) { +TEST_F(Http2ConnPoolImplLegacyTest, RequestAndResponse) { InSequence s; expectClientCreate(); @@ -452,7 +452,7 @@ TEST_F(Http2ConnPoolImplTest, RequestAndResponse) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http2ConnPoolImplTest, LocalReset) { +TEST_F(Http2ConnPoolImplLegacyTest, LocalReset) { InSequence s; expectClientCreate(); @@ -471,7 +471,7 @@ TEST_F(Http2ConnPoolImplTest, LocalReset) { EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); } -TEST_F(Http2ConnPoolImplTest, RemoteReset) { +TEST_F(Http2ConnPoolImplLegacyTest, RemoteReset) { InSequence s; expectClientCreate(); @@ -490,7 +490,7 @@ TEST_F(Http2ConnPoolImplTest, RemoteReset) { EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); } -TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { +TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectWithActiveRequest) { InSequence s; pool_.max_streams_ = 1; @@ -513,7 +513,7 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { +TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectDrainingWithActiveRequest) { InSequence s; pool_.max_streams_ = 1; @@ -548,7 +548,7 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http2ConnPoolImplTest, DrainPrimary) { +TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimary) { InSequence s; pool_.max_streams_ = 1; @@ -582,7 +582,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { dispatcher_.clearDeferredDeleteList(); } -TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { +TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimaryNoActiveRequest) { InSequence s; pool_.max_streams_ = 1; @@ -614,7 +614,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { dispatcher_.clearDeferredDeleteList(); } -TEST_F(Http2ConnPoolImplTest, ConnectTimeout) { +TEST_F(Http2ConnPoolImplLegacyTest, ConnectTimeout) { InSequence s; EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); @@ -650,7 +650,7 @@ TEST_F(Http2ConnPoolImplTest, ConnectTimeout) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http2ConnPoolImplTest, MaxGlobalRequests) { +TEST_F(Http2ConnPoolImplLegacyTest, MaxGlobalRequests) { cluster_->resetResourceManager(1024, 1024, 1, 1, 1); InSequence s; @@ -673,7 +673,7 @@ TEST_F(Http2ConnPoolImplTest, MaxGlobalRequests) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } -TEST_F(Http2ConnPoolImplTest, GoAway) { +TEST_F(Http2ConnPoolImplLegacyTest, GoAway) { InSequence s; expectClientCreate(); @@ -702,12 +702,12 @@ TEST_F(Http2ConnPoolImplTest, GoAway) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_close_notify_.value()); } -TEST_F(Http2ConnPoolImplTest, NoActiveConnectionsByDefault) { +TEST_F(Http2ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { EXPECT_FALSE(pool_.hasActiveConnections()); } // Show that an active request on the primary connection is considered active. -TEST_F(Http2ConnPoolImplTest, ActiveConnectionsHasActiveRequestsTrue) { +TEST_F(Http2ConnPoolImplLegacyTest, ActiveConnectionsHasActiveRequestsTrue) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); @@ -718,7 +718,7 @@ TEST_F(Http2ConnPoolImplTest, ActiveConnectionsHasActiveRequestsTrue) { } // Show that pending requests are considered active. -TEST_F(Http2ConnPoolImplTest, PendingRequestsConsideredActive) { +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsConsideredActive) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -730,7 +730,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsConsideredActive) { // Show that even if there is a primary client still, if all of its requests have completed, then it // does not have any active connections. -TEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnections) { +TEST_F(Http2ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); @@ -742,7 +742,7 @@ TEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection } // Show that if connections are draining, they're still considered active. -TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { +TEST_F(Http2ConnPoolImplLegacyTest, DrainingConnectionsConsideredActive) { pool_.max_streams_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -756,7 +756,7 @@ TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { } // Show that once we've drained all connections, there are no longer any active. -TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { +TEST_F(Http2ConnPoolImplLegacyTest, DrainedConnectionsNotActive) { pool_.max_streams_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); From 9e4f045f68e8d9b9da99d3b8841e1a84dcc2bd33 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 23 Jan 2020 15:35:10 -0800 Subject: [PATCH 30/41] Fix code review issues Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 39 +++++++++++++--------------- source/common/http/conn_pool_base.h | 14 ++++++++-- 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 5efaea8346d87..c1dcad2315655 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -28,11 +28,21 @@ void ConnPoolImplBase::destructAllConnections() { dispatcher_.clearDeferredDeleteList(); } -void ConnPoolImplBase::createNewConnection() { - ENVOY_LOG(debug, "creating a new connection"); - ActiveClientPtr client = instantiateActiveClient(); - ASSERT(client->state_ == ActiveClient::State::CONNECTING); - client->moveIntoList(std::move(client), owningList(client->state_)); +void ConnPoolImplBase::tryCreateNewConnection() { + const bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + // If we are at the connection circuit-breaker limit due to other upstreams having + // too many open connections, and this upstream has no connections, always create one, to + // prevent pending requests being queued to this upstream with no way to be processed. + if (can_create_connection || (ready_clients_.empty() && busy_clients_.empty())) { + ENVOY_LOG(debug, "creating a new connection"); + ActiveClientPtr client = instantiateActiveClient(); + ASSERT(client->state_ == ActiveClient::State::CONNECTING); + client->moveIntoList(std::move(client), owningList(client->state_)); + } } void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, @@ -101,18 +111,7 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(Http::StreamDecoder& re } if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - const bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - - // If we are at the connection circuit-breaker limit due to other upstreams having - // too many open connections, and this upstream has no connections, always create one, to - // prevent pending requests being queued to this upstream with no way to be processed. - if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { - createNewConnection(); - } + tryCreateNewConnection(); return newPendingRequest(response_decoder, callbacks); } else { @@ -268,10 +267,8 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, client.state_ = ActiveClient::State::CLOSED; // If we have pending requests and we just lost a connection we should make a new one. - if (!pending_requests_.empty() && - host_->cluster().resourceManager(priority_).requests().canCreate() && - host_->cluster().resourceManager(priority_).connections().canCreate()) { - createNewConnection(); + if (!pending_requests_.empty()) { + tryCreateNewConnection(); } } else if (event == Network::ConnectionEvent::Connected) { client.conn_connect_ms_->complete(); diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index a85376616c4d9..64a62a3743d50 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -64,7 +64,14 @@ class ConnPoolImplBase : public ConnectionPool::Instance, virtual bool closingWithIncompleteRequest() const PURE; virtual StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) PURE; - enum class State { CONNECTING, READY, BUSY, DRAINING, CLOSED }; + enum class State { + CONNECTING, // Connection is not yet established. + READY, // Additional requests may be immediately dispatched to this connection. + BUSY, // Connection is at its concurrent request limit. + DRAINING, // No more requests can be dispatched to this connection, and it will be closed + // when all requests complete. + CLOSED // Connection is closed and object is queued for destruction. + }; ConnPoolImplBase& parent_; uint64_t remaining_requests_; @@ -129,7 +136,10 @@ class ConnPoolImplBase : public ConnectionPool::Instance, void onUpstreamReady(); void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); - void createNewConnection(); + + // Creates a new connection if allowed by resourceManager, or if created to avoid + // starving this pool. + void tryCreateNewConnection(); public: const Upstream::HostConstSharedPtr host_; From b4f7ef50eb1009b885c9b5a8407dcf500e9861e7 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 23 Jan 2020 15:59:08 -0800 Subject: [PATCH 31/41] docs Signed-off-by: Greg Greenway --- .../upstream/circuit_breaking.rst | 22 ++++++++++++------- docs/root/intro/version_history.rst | 1 + 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index f96066416a78d..a1e6ca3d4402a 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -3,6 +3,14 @@ Circuit breaking ================ +.. warning:: + +Circuit breaking behavior changed in version 1.14. If the previous behavior (documented :ref:`here ` +is needed, disable runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or +"envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or +restart Envoy. Note that this behavior is deprecated and will be removed according to the deprecation +policy. + Circuit breaking is a critical component of distributed systems. It’s nearly always better to fail quickly and apply back pressure downstream as soon as possible. One of the main benefits of an Envoy mesh is that Envoy enforces circuit breaking limits at the network level as opposed to having to @@ -12,23 +20,21 @@ configure and code each application independently. Envoy supports various types .. _arch_overview_circuit_break_cluster_maximum_connections: * **Cluster maximum connections**: The maximum number of connections that Envoy will establish to - all hosts in an upstream cluster. In practice this is only applicable to HTTP/1.1 clusters since - HTTP/2 uses a single connection to each host. If this circuit breaker overflows the :ref:`upstream_cx_overflow + all hosts in an upstream cluster. If this circuit breaker overflows the :ref:`upstream_cx_overflow ` counter for the cluster will increment. * **Cluster maximum pending requests**: The maximum number of requests that will be queued while - waiting for a ready connection pool connection. Since HTTP/2 requests are sent over a single + waiting for a ready connection pool connection. Since HTTP/2 allows many requests concurrently over a single connection, this circuit breaker only comes into play as the initial connection is created, - as requests will be multiplexed immediately afterwards. For HTTP/1.1, requests are added to the list + as requests will be multiplexed immediately afterwards (unless :ref:`max concurrent streams ` + is configured). For HTTP/1.1, requests are added to the list of pending requests whenever there aren't enough upstream connections available to immediately dispatch the request, so this circuit breaker will remain in play for the lifetime of the process. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will increment. * **Cluster maximum requests**: The maximum number of requests that can be outstanding to all hosts - in a cluster at any given time. In practice this is applicable to HTTP/2 clusters since HTTP/1.1 - clusters are governed by the maximum connections circuit breaker. If this circuit breaker - overflows the :ref:`upstream_rq_pending_overflow ` counter - for the cluster will increment. + in a cluster at any given time. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` + counter for the cluster will increment. * **Cluster maximum active retries**: The maximum number of retries that can be outstanding to all hosts in a cluster at any given time. In general we recommend using :ref:`retry budgets `; however, if static circuit breaking is preferred it should aggressively circuit break retries. This is so that retries for sporadic failures are allowed, but the overall retry volume cannot diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 27dc4523010c0..452de6d5b1c79 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,6 +5,7 @@ Version history ================ * config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. * retry: added a retry predicate that :ref:`rejects hosts based on metadata. ` +* upstream: combined http/1 and http/2 connection pool code. This means that circuit breaker limits for both requests and connections apply to both pool types. Also, http/2 now has the option to limit concurrent requests on a connection, and allow multiple draining connections. 1.13.0 (January 20, 2020) ========================= From 0197d3e6dbe3374ff101f51b84f88a636040fc8b Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 08:57:38 -0800 Subject: [PATCH 32/41] try to fix docs build Signed-off-by: Greg Greenway --- docs/root/intro/arch_overview/upstream/circuit_breaking.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index a1e6ca3d4402a..452e19e90512b 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -5,10 +5,10 @@ Circuit breaking .. warning:: -Circuit breaking behavior changed in version 1.14. If the previous behavior (documented :ref:`here ` +Circuit breaking behavior changed in version 1.14. If the previous behavior (documented `here ` is needed, disable runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or -restart Envoy. Note that this behavior is deprecated and will be removed according to the deprecation +restart Envoy. Note that the old behavior is deprecated and will be removed according to the deprecation policy. Circuit breaking is a critical component of distributed systems. It’s nearly always better to fail From 37a1e6128df09be4bd183f17f091f14b2e943a4f Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 09:11:11 -0800 Subject: [PATCH 33/41] more doc fixes Signed-off-by: Greg Greenway --- .../arch_overview/upstream/circuit_breaking.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index 452e19e90512b..33363fadb2871 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -3,13 +3,13 @@ Circuit breaking ================ -.. warning:: +.. attention:: -Circuit breaking behavior changed in version 1.14. If the previous behavior (documented `here ` -is needed, disable runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or -"envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or -restart Envoy. Note that the old behavior is deprecated and will be removed according to the deprecation -policy. + Circuit breaking behavior changed in version 1.14. If the previous behavior (documented `here ` + is needed, disable runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or + "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or + restart Envoy. Note that the old behavior is deprecated and will be removed according to the deprecation + policy. Circuit breaking is a critical component of distributed systems. It’s nearly always better to fail quickly and apply back pressure downstream as soon as possible. One of the main benefits of an Envoy From aebea9e07f965f4bac4fb6bbc5c5a0369f278af7 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 12:14:54 -0800 Subject: [PATCH 34/41] prune unneeded deps Signed-off-by: Greg Greenway --- source/common/http/http1/BUILD | 6 ------ source/common/http/http1/conn_pool.cc | 2 -- source/common/http/http1/conn_pool.h | 10 ---------- source/common/http/http2/BUILD | 6 ------ source/common/http/http2/conn_pool.cc | 3 --- source/common/http/http2/conn_pool.h | 7 +------ 6 files changed, 1 insertion(+), 33 deletions(-) diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 07d661269ac99..dfc07f18b765a 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -84,18 +84,12 @@ envoy_cc_library( "//include/envoy/event:timer_interface", "//include/envoy/http:conn_pool_interface", "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//include/envoy/stats:timespan_interface", "//include/envoy/upstream:upstream_interface", - "//source/common/common:linked_object", - "//source/common/common:utility_lib", "//source/common/http:codec_client_lib", "//source/common/http:codec_wrappers_lib", "//source/common/http:codes_lib", "//source/common/http:conn_pool_base_lib", "//source/common/http:headers_lib", - "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", "//source/common/upstream:upstream_lib", ], diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 5983f4bf2a014..7817f2ad67f4b 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -10,12 +10,10 @@ #include "envoy/http/header_map.h" #include "envoy/upstream/upstream.h" -#include "common/common/utility.h" #include "common/http/codec_client.h" #include "common/http/codes.h" #include "common/http/headers.h" #include "common/http/http1/conn_pool_legacy.h" -#include "common/network/utility.h" #include "common/runtime/runtime_impl.h" #include "absl/strings/match.h" diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 54d9bee6a3ec6..a55eb5f3c8880 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -1,22 +1,12 @@ #pragma once -#include -#include -#include - -#include "envoy/event/deferred_deletable.h" #include "envoy/event/timer.h" #include "envoy/http/codec.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" #include "envoy/upstream/upstream.h" -#include "common/common/linked_object.h" #include "common/http/codec_wrappers.h" #include "common/http/conn_pool_base.h" -#include "absl/types/optional.h" - namespace Envoy { namespace Http { namespace Http1 { diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 6d51a460ce249..ad8d93119ae51 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -82,15 +82,9 @@ envoy_cc_library( deps = [ ":conn_pool_legacy_lib", "//include/envoy/event:dispatcher_interface", - "//include/envoy/event:timer_interface", - "//include/envoy/http:conn_pool_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:timespan_interface", "//include/envoy/upstream:upstream_interface", "//source/common/http:codec_client_lib", "//source/common/http:conn_pool_base_lib", - "//source/common/network:utility_lib", - "//source/common/upstream:upstream_lib", ], ) diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 52fc5e2bb8b37..2fc988a8fe645 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -1,15 +1,12 @@ #include "common/http/http2/conn_pool.h" #include -#include #include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" #include "envoy/upstream/upstream.h" #include "common/http/http2/codec_impl.h" #include "common/http/http2/conn_pool_legacy.h" -#include "common/network/utility.h" namespace Envoy { namespace Http { diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index ee103a4e7f0fd..30685c9a37f82 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -1,15 +1,10 @@ #pragma once #include -#include -#include -#include "envoy/event/timer.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" #include "envoy/upstream/upstream.h" +#include "common/http/codec_client.h" #include "common/http/conn_pool_base.h" namespace Envoy { From 2b24f0091ad07700bf8707715c385a8a001346d4 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 12:35:16 -0800 Subject: [PATCH 35/41] code review items Signed-off-by: Greg Greenway --- .../intro/arch_overview/upstream/circuit_breaking.rst | 9 ++++----- source/common/http/conn_pool_base.h | 2 +- source/common/http/http1/conn_pool.cc | 9 +-------- source/common/http/http1/conn_pool.h | 1 - test/common/http/http2/conn_pool_test.cc | 10 +++++----- 5 files changed, 11 insertions(+), 20 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index 33363fadb2871..638face504009 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -23,12 +23,11 @@ configure and code each application independently. Envoy supports various types all hosts in an upstream cluster. If this circuit breaker overflows the :ref:`upstream_cx_overflow ` counter for the cluster will increment. * **Cluster maximum pending requests**: The maximum number of requests that will be queued while - waiting for a ready connection pool connection. Since HTTP/2 allows many requests concurrently over a single - connection, this circuit breaker only comes into play as the initial connection is created, - as requests will be multiplexed immediately afterwards (unless :ref:`max concurrent streams ` - is configured). For HTTP/1.1, requests are added to the list + waiting for a ready connection pool connection. Requests are added to the list of pending requests whenever there aren't enough upstream connections available to immediately dispatch - the request, so this circuit breaker will remain in play for the lifetime of the process. + the request. For HTTP/2 connections, if :ref:`max concurrent streams ` + is not configured, all requests will be multiplexed over the same connection so this circuit breaker + will only be hit when no connection is already established. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will increment. diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 64a62a3743d50..696bdbba39816 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -102,7 +102,7 @@ class ConnPoolImplBase : public ConnectionPool::Instance, using PendingRequestPtr = std::unique_ptr; - // Create a new CodecClient + // Create a new CodecClient. virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; // Returns a new instance of ActiveClient. diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 7817f2ad67f4b..ec2dd558706ce 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -70,13 +70,6 @@ ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, Acti StreamDecoderWrapper(response_decoder), parent_(parent) { StreamEncoderWrapper::inner_.getStream().addCallbacks(*this); - - // TODO (tonya11en): At the time of writing, there is no way to mix different versions of HTTP - // traffic in the same cluster, so incrementing the request count in the per-cluster resource - // manager will not affect circuit breaking in any unexpected ways. Ideally, outstanding requests - // counts would be tracked the same way in all HTTP versions. - // - // See: https://github.com/envoyproxy/envoy/issues/9215 } ConnPoolImpl::StreamWrapper::~StreamWrapper() { @@ -117,7 +110,7 @@ void ConnPoolImpl::StreamWrapper::onDecodeComplete() { ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) : ConnPoolImplBase::ActiveClient( parent, parent.host_->cluster().maxRequestsPerConnection(), - 1 // HTTP1 always has a concurrent-request-limit of 1 per connection + 1 // HTTP1 always has a concurrent-request-limit of 1 per connection. ) { parent.host_->cluster().stats().upstream_cx_http1_total_.inc(); } diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index a55eb5f3c8880..c9b013b126762 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -66,7 +66,6 @@ class ConnPoolImpl : public ConnPoolImplBase { struct ActiveClient : public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override = default; ConnPoolImpl& parent() { return static_cast(parent_); } diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 1a54d1b2b57cd..590ce4318c15f 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -302,27 +302,27 @@ TEST_F(Http2ConnPoolImplTest, MaxConcurrentRequestsPerStream) { InSequence s; { - // Create request and complete it + // Create request and complete it. expectClientCreate(); ActiveTestRequest r(*this, 0, false); expectClientConnect(0, r); completeRequest(r); } - // Previous request completed, so this one will re-use the connection + // Previous request completed, so this one will re-use the connection. { ActiveTestRequest r(*this, 0, true); completeRequest(r); } - // Two concurrent requests causes one additional connection to be created + // Two concurrent requests causes one additional connection to be created. { ActiveTestRequest r1(*this, 0, true); expectClientCreate(); ActiveTestRequest r2(*this, 1, false); expectClientConnect(1, r2); - // Complete one of them, and create another, and it will re-use the connection + // Complete one of them, and create another, and it will re-use the connection. completeRequest(r2); ActiveTestRequest r3(*this, 1, true); @@ -330,7 +330,7 @@ TEST_F(Http2ConnPoolImplTest, MaxConcurrentRequestsPerStream) { completeRequest(r3); } - // Create two more requests; both should use existing connections + // Create two more requests; both should use existing connections. { ActiveTestRequest r1(*this, 1, true); ActiveTestRequest r2(*this, 0, true); From f8916521164e0bb4ca8381f8854e7ea5243eff7c Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 14:45:14 -0800 Subject: [PATCH 36/41] Fix two bugs: - CONNECTING connections were not immediately closed when drainConnections() is called - H2 pools would establish 1 connection per pending request before any connection was established. Fixed it to look at the limits and establish only enough connections to satisfy the pending requests. Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 27 ++++++- source/common/http/conn_pool_base.h | 10 +++ test/common/http/http2/conn_pool_test.cc | 89 +++++++++++++++++++++++- 3 files changed, 123 insertions(+), 3 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index c1dcad2315655..2bab54a15e400 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -29,6 +29,12 @@ void ConnPoolImplBase::destructAllConnections() { } void ConnPoolImplBase::tryCreateNewConnection() { + if (pending_requests_.size() <= connecting_request_capacity_) { + // There are already enough CONNECTING connections for the number + // of queued requests. + return; + } + const bool can_create_connection = host_->cluster().resourceManager(priority_).connections().canCreate(); if (!can_create_connection) { @@ -41,6 +47,9 @@ void ConnPoolImplBase::tryCreateNewConnection() { ENVOY_LOG(debug, "creating a new connection"); ActiveClientPtr client = instantiateActiveClient(); ASSERT(client->state_ == ActiveClient::State::CONNECTING); + ASSERT(std::numeric_limits::max() - connecting_request_capacity_ >= + client->effectiveConcurrentRequestLimit()); + connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); client->moveIntoList(std::move(client), owningList(client->state_)); } } @@ -111,9 +120,13 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(Http::StreamDecoder& re } if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + ConnectionPool::Cancellable* pending = newPendingRequest(response_decoder, callbacks); + + // This must come after newPendingRequest() because this function uses the + // length of pending_requests_ to determine if a new connection is needed. tryCreateNewConnection(); - return newPendingRequest(response_decoder, callbacks); + return pending; } else { ENVOY_LOG(debug, "max pending requests overflow"); callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), @@ -180,13 +193,18 @@ void ConnPoolImplBase::closeIdleConnections() { // Create a separate list of elements to close to avoid mutate-while-iterating problems. std::list to_close; - // Possibly-idle connections are always in the ready_clients_ list for (auto& client : ready_clients_) { if (!client->hasActiveRequests()) { to_close.push_back(client.get()); } } + for (auto& client : busy_clients_) { + if (client->state_ == ActiveClient::State::CONNECTING) { + to_close.push_back(client.get()); + } + } + for (auto& entry : to_close) { entry->close(); } @@ -227,6 +245,11 @@ void ConnPoolImplBase::checkForDrained() { void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, Network::ConnectionEvent event) { + if (client.state_ == ActiveClient::State::CONNECTING) { + ASSERT(connecting_request_capacity_ >= client.effectiveConcurrentRequestLimit()); + connecting_request_capacity_ -= client.effectiveConcurrentRequestLimit(); + } + if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { // The client died. diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 696bdbba39816..ab1ee34e908c6 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -60,6 +60,12 @@ class ConnPoolImplBase : public ConnectionPool::Instance, void onConnectTimeout(); void close() { codec_client_->close(); } + // Returns the concurrent request limit, accounting for if the total request limit + // is less than the concurrent request limit. + uint64_t effectiveConcurrentRequestLimit() const { + return std::min(remaining_requests_, concurrent_request_limit_); + } + virtual bool hasActiveRequests() const PURE; virtual bool closingWithIncompleteRequest() const PURE; virtual StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) PURE; @@ -165,6 +171,10 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // The number of requests currently attached to clients. uint64_t num_active_requests_{0}; + + // The number of requests that can be immediately dispatched + // if all CONNECTING connections become connected. + uint64_t connecting_request_capacity_{0}; }; } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 590ce4318c15f..e1edf0010a24f 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -60,7 +60,11 @@ class Http2ConnPoolImplTest : public testing::Test { Http2ConnPoolImplTest() : api_(Api::createApiForTest(stats_store_)), - pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} + pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) { + // Default connections to 1024 because the tests shouldn't be relying on the + // connection resource limit for most tests. + cluster_->resetResourceManager(1024, 1024, 1024, 1, 1); + } ~Http2ConnPoolImplTest() override { EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); @@ -254,6 +258,17 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionBusy) { r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); } +TEST_F(Http2ConnPoolImplTest, DrainConnectionConnecting) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + + expectStreamReset(r); + EXPECT_CALL(*this, onClientDestroy()); + pool_.drainConnections(); +} + /** * Verify that connections are drained when requested. */ @@ -383,6 +398,78 @@ TEST_F(Http2ConnPoolImplTest, PendingRequests) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } +// Verifies that the correct number of CONNECTING connections are created for +// the pending requests, when the total requests per connection is limited +TEST_F(Http2ConnPoolImplTest, PendingRequestsNumberConnectingTotalRequestsPerConnection) { + cluster_->max_requests_per_connection_ = 2; + InSequence s; + + // Create three requests. The 3rd should create a 2nd connection due to the limit + // of 2 requests per connection. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + expectClientCreate(); + ActiveTestRequest r3(*this, 1, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + expectStreamConnect(0, r1); + expectClientConnect(0, r2); + expectClientConnect(1, r3); + + // Send a request through each stream. + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); + r3.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // Clean up everything. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); +} + +// Verifies that the correct number of CONNECTING connections are created for +// the pending requests, when the concurrent requests per connection is limited +TEST_F(Http2ConnPoolImplTest, PendingRequestsNumberConnectingConcurrentRequestsPerConnection) { + cluster_->http2_settings_.max_concurrent_streams_ = 2; + InSequence s; + + // Create three requests. The 3rd should create a 2nd connection due to the limit + // of 2 requests per connection. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + expectClientCreate(); + ActiveTestRequest r3(*this, 1, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + expectStreamConnect(0, r1); + expectClientConnect(0, r2); + expectClientConnect(1, r3); + + // Send a request through each stream. + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); + r3.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // Clean up everything. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); +} + // Verifies that requests are queued up in the conn pool and fail when the connection // fails to be established. TEST_F(Http2ConnPoolImplTest, PendingRequestsFailure) { From 7b8857f0ebcd7bbba2c986c8761df402ef5b671e Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 15:15:17 -0800 Subject: [PATCH 37/41] More doc changes: deprecated.rst, circuit breakers, and conn pool docs. Signed-off-by: Greg Greenway --- .../intro/arch_overview/upstream/circuit_breaking.rst | 6 +++--- .../arch_overview/upstream/connection_pooling.rst | 11 ++++++++--- docs/root/intro/deprecated.rst | 4 ++++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index 638face504009..5a2d3025a7376 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -26,9 +26,9 @@ configure and code each application independently. Envoy supports various types waiting for a ready connection pool connection. Requests are added to the list of pending requests whenever there aren't enough upstream connections available to immediately dispatch the request. For HTTP/2 connections, if :ref:`max concurrent streams ` - is not configured, all requests will be multiplexed over the same connection so this circuit breaker - will only be hit when no connection is already established. - If this circuit breaker overflows the + and :ref:`max requests per connection ` are not + configured, all requests will be multiplexed over the same connection so this circuit breaker + will only be hit when no connection is already established. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will increment. * **Cluster maximum requests**: The maximum number of requests that can be outstanding to all hosts diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index 59215dbd436c7..ebb0031e4e971 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -20,9 +20,14 @@ downstream request must be reset if the upstream connection is severed. HTTP/2 ------ -The HTTP/2 connection pool acquires a single connection to an upstream host. All requests are -multiplexed over this connection. If a GOAWAY frame is received or if the connection reaches the -maximum stream limit, the connection pool will create a new connection and drain the existing one. +The HTTP/2 connection pool multiplexes multiple requests over a single connection, up to the limits +imposed by :ref:`max concurrent streams ` +and :ref:`max requests per connection `. +The HTTP/2 connection pool establishes only as many connections as are needed to serve the current +requests. With no limits, this will be only a single connection. If a GOAWAY frame is received or +if the connection reaches the maximum stream limit, the connection pool will drain the existing one. +New connections are established anytime there is a pending request without a connection that it can +be dispatched to (up to circuit breaker limits for connections). HTTP/2 is the preferred communication protocol as connections rarely if ever get severed. .. _arch_overview_conn_pool_health_checking: diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst index 1077e56f0d9d7..a45e3795f3a3f 100644 --- a/docs/root/intro/deprecated.rst +++ b/docs/root/intro/deprecated.rst @@ -12,6 +12,10 @@ Deprecated items below are listed in chronological order. 1.14.0 (Pending) ================ +* The previous behavior for upstream connection pool circuit breaking described +`here ` has +been deprecated in favor of the new behavior described +`here `. 1.13.0 (January 20, 2020) ========================= From 1a826d4358379e73d4ab06440735627bcc63e84e Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 15:39:55 -0800 Subject: [PATCH 38/41] formatting of md Signed-off-by: Greg Greenway --- docs/root/intro/deprecated.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst index a45e3795f3a3f..887b9802a36b1 100644 --- a/docs/root/intro/deprecated.rst +++ b/docs/root/intro/deprecated.rst @@ -13,9 +13,9 @@ Deprecated items below are listed in chronological order. 1.14.0 (Pending) ================ * The previous behavior for upstream connection pool circuit breaking described -`here ` has -been deprecated in favor of the new behavior described -`here `. + `here ` has + been deprecated in favor of the new behavior described + `here `. 1.13.0 (January 20, 2020) ========================= From 51bc87939df5c9beeb7e2da11603048168bb04f8 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Jan 2020 15:53:39 -0800 Subject: [PATCH 39/41] properly mark as external link Signed-off-by: Greg Greenway --- docs/root/intro/arch_overview/upstream/circuit_breaking.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index 5a2d3025a7376..24ebb407848d3 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -5,7 +5,7 @@ Circuit breaking .. attention:: - Circuit breaking behavior changed in version 1.14. If the previous behavior (documented `here ` + Circuit breaking behavior changed in version 1.14. If the previous behavior (documented `here `_ is needed, disable runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or restart Envoy. Note that the old behavior is deprecated and will be removed according to the deprecation From 7c5d2619304fbd70729460f29bacf095ae15b1f8 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Mon, 27 Jan 2020 17:11:53 -0800 Subject: [PATCH 40/41] Fix a bug introduced in last change that could put a CONNECTING client into DRAINING, which doesn't make sense. Also minor comment improvements. Signed-off-by: Greg Greenway --- source/common/http/conn_pool_base.cc | 25 +++++++++++++++--------- source/common/http/conn_pool_base.h | 2 ++ test/common/http/http1/conn_pool_test.cc | 5 ++--- test/common/http/http2/conn_pool_test.cc | 15 ++++++++++++-- 4 files changed, 33 insertions(+), 14 deletions(-) diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 2bab54a15e400..89e63a7d1e703 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -147,11 +147,6 @@ void ConnPoolImplBase::onUpstreamReady() { } } -void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - bool ConnPoolImplBase::hasActiveConnections() const { return (!pending_requests_.empty() || (num_active_requests_ > 0)); } @@ -189,6 +184,11 @@ void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, } } +void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + void ConnPoolImplBase::closeIdleConnections() { // Create a separate list of elements to close to avoid mutate-while-iterating problems. std::list to_close; @@ -199,9 +199,11 @@ void ConnPoolImplBase::closeIdleConnections() { } } - for (auto& client : busy_clients_) { - if (client->state_ == ActiveClient::State::CONNECTING) { - to_close.push_back(client.get()); + if (pending_requests_.empty()) { + for (auto& client : busy_clients_) { + if (client->state_ == ActiveClient::State::CONNECTING) { + to_close.push_back(client.get()); + } } } @@ -224,7 +226,12 @@ void ConnPoolImplBase::drainConnections() { // so use a for-loop since the list is not mutated. ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); for (auto& busy_client : busy_clients_) { - transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); + // Moving a CONNECTING client to DRAINING would violate state assumptions, namely that DRAINING + // connections have active requests (otherwise they would be closed) and that clients receiving + // a Connected event are in state CONNECTING. + if (busy_client->state_ != ActiveClient::State::CONNECTING) { + transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); + } } } diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index ab1ee34e908c6..3d2fea253e19f 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -164,9 +164,11 @@ class ConnPoolImplBase : public ConnectionPool::Instance, std::list pending_requests_to_purge_; // Clients that are ready to handle additional requests. + // All entries are in state READY. std::list ready_clients_; // Clients that are not ready to handle additional requests. + // Entries are in possible states CONNECTING, BUSY, or DRAINING. std::list busy_clients_; // The number of requests currently attached to clients. diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index ae535d40d3aea..e06808ca71703 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -853,11 +853,10 @@ TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { EXPECT_NE(nullptr, handle); conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - handle->cancel(); EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(drained, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + handle->cancel(); EXPECT_CALL(conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); @@ -946,7 +945,7 @@ TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value()); } } // namespace diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index e1edf0010a24f..55386371c08c0 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -147,7 +147,8 @@ class ActiveTestRequest { EXPECT_CALL(callbacks_.pool_ready_, ready()); EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); } else { - EXPECT_NE(nullptr, test.pool_.newStream(decoder_, callbacks_)); + handle_ = test.pool_.newStream(decoder_, callbacks_); + EXPECT_NE(nullptr, handle_); } } @@ -155,6 +156,7 @@ class ActiveTestRequest { ConnPoolCallbacks callbacks_; Http::StreamDecoder* inner_decoder_{}; NiceMock inner_encoder_; + ConnectionPool::Cancellable* handle_{}; }; void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { @@ -258,13 +260,22 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionBusy) { r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); } +/** + * Verify that draining connections with a pending request does not + * close the connection, but draining without a pending request does close + * the connection. + */ TEST_F(Http2ConnPoolImplTest, DrainConnectionConnecting) { InSequence s; expectClientCreate(); ActiveTestRequest r(*this, 0, false); - expectStreamReset(r); + // Pending request prevents the connection from being drained + pool_.drainConnections(); + + // Cancel the pending request, and then the connection can be closed. + r.handle_->cancel(); EXPECT_CALL(*this, onClientDestroy()); pool_.drainConnections(); } From dab1c606abc47565f6860806a8fb86a12f70a685 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Wed, 29 Jan 2020 07:54:39 -0800 Subject: [PATCH 41/41] doc fixes Signed-off-by: Greg Greenway --- .../intro/arch_overview/upstream/circuit_breaking.rst | 8 -------- docs/root/intro/deprecated.rst | 5 ++--- docs/root/intro/version_history.rst | 9 ++++++++- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index 24ebb407848d3..532d9645bd56c 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -3,14 +3,6 @@ Circuit breaking ================ -.. attention:: - - Circuit breaking behavior changed in version 1.14. If the previous behavior (documented `here `_ - is needed, disable runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or - "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or - restart Envoy. Note that the old behavior is deprecated and will be removed according to the deprecation - policy. - Circuit breaking is a critical component of distributed systems. It’s nearly always better to fail quickly and apply back pressure downstream as soon as possible. One of the main benefits of an Envoy mesh is that Envoy enforces circuit breaking limits at the network level as opposed to having to diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst index 887b9802a36b1..c591472974187 100644 --- a/docs/root/intro/deprecated.rst +++ b/docs/root/intro/deprecated.rst @@ -13,9 +13,8 @@ Deprecated items below are listed in chronological order. 1.14.0 (Pending) ================ * The previous behavior for upstream connection pool circuit breaking described - `here ` has - been deprecated in favor of the new behavior described - `here `. + `here `_ has + been deprecated in favor of the new behavior described :ref:`here `. 1.13.0 (January 20, 2020) ========================= diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index bcc190cdd83f1..837838771af86 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,7 +5,14 @@ Version history ================ * config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. * retry: added a retry predicate that :ref:`rejects hosts based on metadata. ` -* upstream: combined http/1 and http/2 connection pool code. This means that circuit breaker limits for both requests and connections apply to both pool types. Also, http/2 now has the option to limit concurrent requests on a connection, and allow multiple draining connections. +* upstream: combined HTTP/1 and HTTP/2 connection pool code. This means that circuit breaker + limits for both requests and connections apply to both pool types. Also, HTTP/2 now has + the option to limit concurrent requests on a connection, and allow multiple draining + connections. The old behavior is deprecated, but can be used during the deprecation + period by disabling runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or + "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or + restart Envoy. The behavior will not switch until the connection pools are recreated. The new + circuit breaker behavior is described :ref:`here `. * upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode`. 1.13.0 (January 20, 2020)