diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index c8b8cdc6dd354..322add56edf17 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -117,6 +117,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index dbbb4318e0cfe..77e7359fca000 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -136,6 +136,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index f96066416a78d..532d9645bd56c 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -12,23 +12,20 @@ configure and code each application independently. Envoy supports various types .. _arch_overview_circuit_break_cluster_maximum_connections: * **Cluster maximum connections**: The maximum number of connections that Envoy will establish to - all hosts in an upstream cluster. In practice this is only applicable to HTTP/1.1 clusters since - HTTP/2 uses a single connection to each host. If this circuit breaker overflows the :ref:`upstream_cx_overflow + all hosts in an upstream cluster. If this circuit breaker overflows the :ref:`upstream_cx_overflow ` counter for the cluster will increment. * **Cluster maximum pending requests**: The maximum number of requests that will be queued while - waiting for a ready connection pool connection. Since HTTP/2 requests are sent over a single - connection, this circuit breaker only comes into play as the initial connection is created, - as requests will be multiplexed immediately afterwards. For HTTP/1.1, requests are added to the list + waiting for a ready connection pool connection. Requests are added to the list of pending requests whenever there aren't enough upstream connections available to immediately dispatch - the request, so this circuit breaker will remain in play for the lifetime of the process. - If this circuit breaker overflows the + the request. For HTTP/2 connections, if :ref:`max concurrent streams ` + and :ref:`max requests per connection ` are not + configured, all requests will be multiplexed over the same connection so this circuit breaker + will only be hit when no connection is already established. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will increment. * **Cluster maximum requests**: The maximum number of requests that can be outstanding to all hosts - in a cluster at any given time. In practice this is applicable to HTTP/2 clusters since HTTP/1.1 - clusters are governed by the maximum connections circuit breaker. If this circuit breaker - overflows the :ref:`upstream_rq_pending_overflow ` counter - for the cluster will increment. + in a cluster at any given time. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` + counter for the cluster will increment. * **Cluster maximum active retries**: The maximum number of retries that can be outstanding to all hosts in a cluster at any given time. In general we recommend using :ref:`retry budgets `; however, if static circuit breaking is preferred it should aggressively circuit break retries. This is so that retries for sporadic failures are allowed, but the overall retry volume cannot diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index 59215dbd436c7..ebb0031e4e971 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -20,9 +20,14 @@ downstream request must be reset if the upstream connection is severed. HTTP/2 ------ -The HTTP/2 connection pool acquires a single connection to an upstream host. All requests are -multiplexed over this connection. If a GOAWAY frame is received or if the connection reaches the -maximum stream limit, the connection pool will create a new connection and drain the existing one. +The HTTP/2 connection pool multiplexes multiple requests over a single connection, up to the limits +imposed by :ref:`max concurrent streams ` +and :ref:`max requests per connection `. +The HTTP/2 connection pool establishes only as many connections as are needed to serve the current +requests. With no limits, this will be only a single connection. If a GOAWAY frame is received or +if the connection reaches the maximum stream limit, the connection pool will drain the existing one. +New connections are established anytime there is a pending request without a connection that it can +be dispatched to (up to circuit breaker limits for connections). HTTP/2 is the preferred communication protocol as connections rarely if ever get severed. .. _arch_overview_conn_pool_health_checking: diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst index 1077e56f0d9d7..c591472974187 100644 --- a/docs/root/intro/deprecated.rst +++ b/docs/root/intro/deprecated.rst @@ -12,6 +12,9 @@ Deprecated items below are listed in chronological order. 1.14.0 (Pending) ================ +* The previous behavior for upstream connection pool circuit breaking described + `here `_ has + been deprecated in favor of the new behavior described :ref:`here `. 1.13.0 (January 20, 2020) ========================= diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 97eef67562281..837838771af86 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,6 +5,14 @@ Version history ================ * config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. * retry: added a retry predicate that :ref:`rejects hosts based on metadata. ` +* upstream: combined HTTP/1 and HTTP/2 connection pool code. This means that circuit breaker + limits for both requests and connections apply to both pool types. Also, HTTP/2 now has + the option to limit concurrent requests on a connection, and allow multiple draining + connections. The old behavior is deprecated, but can be used during the deprecation + period by disabling runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or + "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or + restart Envoy. The behavior will not switch until the connection pools are recreated. The new + circuit breaker behavior is described :ref:`here `. * upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode`. 1.13.0 (January 20, 2020) diff --git a/generated_api_shadow/envoy/api/v2/core/protocol.proto b/generated_api_shadow/envoy/api/v2/core/protocol.proto index c8b8cdc6dd354..322add56edf17 100644 --- a/generated_api_shadow/envoy/api/v2/core/protocol.proto +++ b/generated_api_shadow/envoy/api/v2/core/protocol.proto @@ -117,6 +117,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index dbbb4318e0cfe..77e7359fca000 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -136,6 +136,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; diff --git a/source/common/common/linked_object.h b/source/common/common/linked_object.h index 2efbc14cf6b76..13fc6d491567b 100644 --- a/source/common/common/linked_object.h +++ b/source/common/common/linked_object.h @@ -28,15 +28,15 @@ template class LinkedObject { bool inserted() { return inserted_; } /** - * Move a linked item between 2 lists. - * @param list1 supplies the first list. - * @param list2 supplies the second list. + * Move a linked item from src list to dst list. + * @param src supplies the list that the item is currently in. + * @param dst supplies the destination list for the item. */ - void moveBetweenLists(ListType& list1, ListType& list2) { + void moveBetweenLists(ListType& src, ListType& dst) { ASSERT(inserted_); - ASSERT(std::find(list1.begin(), list1.end(), *entry_) != list1.end()); + ASSERT(std::find(src.begin(), src.end(), *entry_) != src.end()); - list2.splice(list2.begin(), list1, entry_); + dst.splice(dst.begin(), src, entry_); } /** diff --git a/source/common/http/BUILD b/source/common/http/BUILD index dfc48436824c2..9e8d0f9febef4 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -114,6 +114,20 @@ envoy_cc_library( name = "conn_pool_base_lib", srcs = ["conn_pool_base.cc"], hdrs = ["conn_pool_base.h"], + deps = [ + ":codec_client_lib", + "//include/envoy/http:conn_pool_interface", + "//include/envoy/stats:timespan_interface", + "//source/common/common:linked_object", + "//source/common/stats:timespan_lib", + "//source/common/upstream:upstream_lib", + ], +) + +envoy_cc_library( + name = "conn_pool_base_legacy_lib", + srcs = ["conn_pool_base_legacy.cc"], + hdrs = ["conn_pool_base_legacy.h"], deps = [ "//include/envoy/http:conn_pool_interface", "//include/envoy/stats:timespan_interface", diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index ac3d303a41646..89e63a7d1e703 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -1,40 +1,320 @@ #include "common/http/conn_pool_base.h" #include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" namespace Envoy { namespace Http { -ConnPoolImplBase::ActiveClient::ActiveClient(Event::Dispatcher& dispatcher, - const Upstream::ClusterInfo& cluster) - : connect_timer_(dispatcher.createTimer([this]() -> void { onConnectTimeout(); })) { +ConnPoolImplBase::ConnPoolImplBase( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), + transport_socket_options_(transport_socket_options) {} - conn_connect_ms_ = std::make_unique( - cluster.stats().upstream_cx_connect_ms_, dispatcher.timeSource()); - conn_length_ = std::make_unique( - cluster.stats().upstream_cx_length_ms_, dispatcher.timeSource()); - connect_timer_->enableTimer(cluster.connectTimeout()); +ConnPoolImplBase::~ConnPoolImplBase() { + ASSERT(ready_clients_.empty()); + ASSERT(busy_clients_.empty()); +} + +void ConnPoolImplBase::destructAllConnections() { + for (auto* list : {&ready_clients_, &busy_clients_}) { + while (!list->empty()) { + list->front()->close(); + } + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void ConnPoolImplBase::tryCreateNewConnection() { + if (pending_requests_.size() <= connecting_request_capacity_) { + // There are already enough CONNECTING connections for the number + // of queued requests. + return; + } + + const bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + // If we are at the connection circuit-breaker limit due to other upstreams having + // too many open connections, and this upstream has no connections, always create one, to + // prevent pending requests being queued to this upstream with no way to be processed. + if (can_create_connection || (ready_clients_.empty() && busy_clients_.empty())) { + ENVOY_LOG(debug, "creating a new connection"); + ActiveClientPtr client = instantiateActiveClient(); + ASSERT(client->state_ == ActiveClient::State::CONNECTING); + ASSERT(std::numeric_limits::max() - connecting_request_capacity_ >= + client->effectiveConcurrentRequestLimit()); + connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); + client->moveIntoList(std::move(client), owningList(client->state_)); + } +} + +void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + ASSERT(client.state_ == ActiveClient::State::READY); + + if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { + ENVOY_LOG(debug, "max requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + } else { + ENVOY_CONN_LOG(debug, "creating stream", *client.codec_client_); + StreamEncoder& new_encoder = client.newStreamEncoder(response_decoder); + + client.remaining_requests_--; + if (client.remaining_requests_ == 0) { + ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", *client.codec_client_); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + transitionActiveClientState(client, ActiveClient::State::DRAINING); + } else if (client.codec_client_->numActiveRequests() >= client.concurrent_request_limit_) { + transitionActiveClientState(client, ActiveClient::State::BUSY); + } + + num_active_requests_++; + host_->stats().rq_total_.inc(); + host_->stats().rq_active_.inc(); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->cluster().stats().upstream_rq_active_.inc(); + host_->cluster().resourceManager(priority_).requests().inc(); + callbacks.onPoolReady(new_encoder, client.real_host_description_, + client.codec_client_->streamInfo()); + } +} + +void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attaching_request) { + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.codec_client_, + client.codec_client_->numActiveRequests()); + ASSERT(num_active_requests_ > 0); + num_active_requests_--; + host_->stats().rq_active_.dec(); + host_->cluster().stats().upstream_rq_active_.dec(); + host_->cluster().resourceManager(priority_).requests().dec(); + if (client.state_ == ActiveClient::State::DRAINING && + client.codec_client_->numActiveRequests() == 0) { + // Close out the draining client if we no longer have active requests. + client.codec_client_->close(); + } else if (client.state_ == ActiveClient::State::BUSY) { + // A request was just ended, so we should be below the limit now. + ASSERT(client.codec_client_->numActiveRequests() < client.concurrent_request_limit_); + + transitionActiveClientState(client, ActiveClient::State::READY); + if (!delay_attaching_request) { + onUpstreamReady(); + } + } +} + +ConnectionPool::Cancellable* ConnPoolImplBase::newStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + if (!ready_clients_.empty()) { + ActiveClient& client = *ready_clients_.front(); + ENVOY_CONN_LOG(debug, "using existing connection", *client.codec_client_); + attachRequestToClient(client, response_decoder, callbacks); + return nullptr; + } + + if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + ConnectionPool::Cancellable* pending = newPendingRequest(response_decoder, callbacks); + + // This must come after newPendingRequest() because this function uses the + // length of pending_requests_ to determine if a new connection is needed. + tryCreateNewConnection(); + + return pending; + } else { + ENVOY_LOG(debug, "max pending requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } +} + +void ConnPoolImplBase::onUpstreamReady() { + while (!pending_requests_.empty() && !ready_clients_.empty()) { + ActiveClientPtr& client = ready_clients_.front(); + ENVOY_CONN_LOG(debug, "attaching to next request", *client->codec_client_); + // Pending requests are pushed onto the front, so pull from the back. + attachRequestToClient(*client, pending_requests_.back()->decoder_, + pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } +} + +bool ConnPoolImplBase::hasActiveConnections() const { + return (!pending_requests_.empty() || (num_active_requests_ > 0)); +} + +std::list& +ConnPoolImplBase::owningList(ActiveClient::State state) { + switch (state) { + case ActiveClient::State::CONNECTING: + return busy_clients_; + case ActiveClient::State::READY: + return ready_clients_; + case ActiveClient::State::BUSY: + return busy_clients_; + case ActiveClient::State::DRAINING: + return busy_clients_; + case ActiveClient::State::CLOSED: + NOT_REACHED_GCOVR_EXCL_LINE; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, + ActiveClient::State new_state) { + auto& old_list = owningList(client.state_); + auto& new_list = owningList(new_state); + client.state_ = new_state; + + // old_list and new_list can be equal when transitioning from BUSY to DRAINING. + // + // The documentation for list.splice() (which is what moveBetweenLists() calls) is + // unclear whether it is allowed for src and dst to be the same, so check here + // since it is a no-op anyways. + if (&old_list != &new_list) { + client.moveBetweenLists(old_list, new_list); + } +} + +void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); } -void ConnPoolImplBase::ActiveClient::recordConnectionSetup() { - conn_connect_ms_->complete(); - conn_connect_ms_.reset(); +void ConnPoolImplBase::closeIdleConnections() { + // Create a separate list of elements to close to avoid mutate-while-iterating problems. + std::list to_close; + + for (auto& client : ready_clients_) { + if (!client->hasActiveRequests()) { + to_close.push_back(client.get()); + } + } + + if (pending_requests_.empty()) { + for (auto& client : busy_clients_) { + if (client->state_ == ActiveClient::State::CONNECTING) { + to_close.push_back(client.get()); + } + } + } + + for (auto& entry : to_close) { + entry->close(); + } } -void ConnPoolImplBase::ActiveClient::disarmConnectTimeout() { - if (connect_timer_) { - connect_timer_->disableTimer(); - connect_timer_.reset(); +void ConnPoolImplBase::drainConnections() { + closeIdleConnections(); + + // closeIdleConnections() closes all connections in ready_clients_ with no active requests, + // so all remaining entries in ready_clients_ are serving requests. Move them and all entries + // in busy_clients_ to draining. + while (!ready_clients_.empty()) { + transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); + } + + // Changing busy_clients_ to DRAINING does not move them between lists, + // so use a for-loop since the list is not mutated. + ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); + for (auto& busy_client : busy_clients_) { + // Moving a CONNECTING client to DRAINING would violate state assumptions, namely that DRAINING + // connections have active requests (otherwise they would be closed) and that clients receiving + // a Connected event are in state CONNECTING. + if (busy_client->state_ != ActiveClient::State::CONNECTING) { + transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); + } } } -ConnPoolImplBase::ActiveClient::ConnectionState ConnPoolImplBase::ActiveClient::connectionState() { - // We don't track any failure state, as the client should be deferred destroyed once a failure - // event is handled. - if (connect_timer_) { - return Connecting; +void ConnPoolImplBase::checkForDrained() { + if (drained_callbacks_.empty()) { + return; } - return Connected; + closeIdleConnections(); + + if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty()) { + ENVOY_LOG(debug, "invoking drained callbacks"); + for (const DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, + Network::ConnectionEvent event) { + if (client.state_ == ActiveClient::State::CONNECTING) { + ASSERT(connecting_request_capacity_ >= client.effectiveConcurrentRequestLimit()); + connecting_request_capacity_ -= client.effectiveConcurrentRequestLimit(); + } + + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + // The client died. + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + const bool incomplete_request = client.closingWithIncompleteRequest(); + if (incomplete_request) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + if (client.state_ == ActiveClient::State::CONNECTING) { + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + purgePendingRequests(client.real_host_description_, + client.codec_client_->connectionFailureReason()); + } + + // We need to release our resourceManager() resources before checking below for + // whether we can create a new connection. Normally this would happen when + // client's destructor runs, but this object needs to be deferredDelete'd(), so + // this forces part of its cleanup to happen now. + client.releaseResources(); + + dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); + if (incomplete_request) { + checkForDrained(); + } + + client.state_ = ActiveClient::State::CLOSED; + + // If we have pending requests and we just lost a connection we should make a new one. + if (!pending_requests_.empty()) { + tryCreateNewConnection(); + } + } else if (event == Network::ConnectionEvent::Connected) { + client.conn_connect_ms_->complete(); + client.conn_connect_ms_.reset(); + + ASSERT(client.state_ == ActiveClient::State::CONNECTING); + transitionActiveClientState(client, ActiveClient::State::READY); + + onUpstreamReady(); + checkForDrained(); + } + + if (client.connect_timer_) { + client.connect_timer_->disableTimer(); + client.connect_timer_.reset(); + } } ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, @@ -89,5 +369,65 @@ void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { checkForDrained(); } +namespace { +// Translate zero to UINT64_MAX so that the zero/unlimited case doesn't +// have to be handled specially. +uint64_t translateZeroToUnlimited(uint64_t limit) { + return (limit != 0) ? limit : std::numeric_limits::max(); +} +} // namespace + +ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent, + uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit) + : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), + concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), + connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + codec_client_ = parent_.createCodecClient(data); + codec_client_->addConnectionCallbacks(*this); + + conn_connect_ms_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); + conn_length_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); + connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); + + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); + + codec_client_->setConnectionStats( + {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); +} + +ConnPoolImplBase::ActiveClient::~ActiveClient() { releaseResources(); } + +void ConnPoolImplBase::ActiveClient::releaseResources() { + if (!resources_released_) { + resources_released_ = true; + + conn_length_->complete(); + + parent_.host_->cluster().stats().upstream_cx_active_.dec(); + parent_.host_->stats().cx_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); + } +} + +void ConnPoolImplBase::ActiveClient::onConnectTimeout() { + ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); + parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + close(); +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index b2bad79126f5e..3d2fea253e19f 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -1,9 +1,12 @@ #pragma once +#include "envoy/event/dispatcher.h" #include "envoy/http/conn_pool.h" +#include "envoy/network/connection.h" #include "envoy/stats/timespan.h" #include "common/common/linked_object.h" +#include "common/http/codec_client.h" #include "absl/strings/string_view.h" @@ -11,33 +14,85 @@ namespace Envoy { namespace Http { // Base class that handles request queueing logic shared between connection pool implementations. -class ConnPoolImplBase : protected Logger::Loggable { +class ConnPoolImplBase : public ConnectionPool::Instance, + protected Logger::Loggable { +public: + // ConnectionPool::Instance + ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) override; + void addDrainedCallback(DrainedCb cb) override; + bool hasActiveConnections() const override; + void drainConnections() override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + protected: - ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority) - : host_(host), priority_(priority) {} - virtual ~ConnPoolImplBase() = default; + ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + virtual ~ConnPoolImplBase(); + + // Closes and destroys all connections. This must be called in the destructor of + // derived classes because the derived ActiveClient will downcast parent_ to a more + // specific type of ConnPoolImplBase, but if the more specific part is already destructed + // (due to bottom-up destructor ordering in c++) that access will be invalid. + void destructAllConnections(); // ActiveClient provides a base class for connection pool clients that handles connection timings // as well as managing the connection timeout. - class ActiveClient { + class ActiveClient : public LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable { public: - ActiveClient(Event::Dispatcher& dispatcher, const Upstream::ClusterInfo& cluster); - virtual ~ActiveClient() { conn_length_->complete(); } - - virtual void onConnectTimeout() PURE; - - void recordConnectionSetup(); - void disarmConnectTimeout(); + ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit); + virtual ~ActiveClient(); + + void releaseResources(); + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, event); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + void onConnectTimeout(); + void close() { codec_client_->close(); } + + // Returns the concurrent request limit, accounting for if the total request limit + // is less than the concurrent request limit. + uint64_t effectiveConcurrentRequestLimit() const { + return std::min(remaining_requests_, concurrent_request_limit_); + } + + virtual bool hasActiveRequests() const PURE; + virtual bool closingWithIncompleteRequest() const PURE; + virtual StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) PURE; + + enum class State { + CONNECTING, // Connection is not yet established. + READY, // Additional requests may be immediately dispatched to this connection. + BUSY, // Connection is at its concurrent request limit. + DRAINING, // No more requests can be dispatched to this connection, and it will be closed + // when all requests complete. + CLOSED // Connection is closed and object is queued for destruction. + }; - enum ConnectionState { Connecting, Connected }; - ConnectionState connectionState(); - - private: - Event::TimerPtr connect_timer_; + ConnPoolImplBase& parent_; + uint64_t remaining_requests_; + const uint64_t concurrent_request_limit_; + State state_{State::CONNECTING}; + CodecClientPtr codec_client_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; Stats::TimespanPtr conn_connect_ms_; Stats::TimespanPtr conn_length_; + Event::TimerPtr connect_timer_; + bool resources_released_{false}; }; + using ActiveClientPtr = std::unique_ptr; + struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks); @@ -53,6 +108,15 @@ class ConnPoolImplBase : protected Logger::Loggable { using PendingRequestPtr = std::unique_ptr; + // Create a new CodecClient. + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; + + // Returns a new instance of ActiveClient. + virtual ActiveClientPtr instantiateActiveClient() PURE; + + // Gets a pointer to the list that currently owns this client. + std::list& owningList(ActiveClient::State state); + // Creates a new PendingRequest and enqueues it into the request queue. ConnectionPool::Cancellable* newPendingRequest(StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks); @@ -64,15 +128,55 @@ class ConnPoolImplBase : protected Logger::Loggable { void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason); - // Must be implemented by sub class. Attempts to drain inactive clients. - virtual void checkForDrained() PURE; + // Closes any idle connections. + void closeIdleConnections(); + + // Called by derived classes any time a request is completed or destroyed for any reason. + void onRequestClosed(ActiveClient& client, bool delay_attaching_request); + // Changes the state_ of an ActiveClient and moves to the appropriate list. + void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); + + void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); + void checkForDrained(); + void onUpstreamReady(); + void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks); + + // Creates a new connection if allowed by resourceManager, or if created to avoid + // starving this pool. + void tryCreateNewConnection(); + +public: const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; + +protected: + Event::Dispatcher& dispatcher_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + + std::list drained_callbacks_; std::list pending_requests_; + // When calling purgePendingRequests, this list will be used to hold the requests we are about // to purge. We need this if one cancelled requests cancels a different pending request std::list pending_requests_to_purge_; + + // Clients that are ready to handle additional requests. + // All entries are in state READY. + std::list ready_clients_; + + // Clients that are not ready to handle additional requests. + // Entries are in possible states CONNECTING, BUSY, or DRAINING. + std::list busy_clients_; + + // The number of requests currently attached to clients. + uint64_t num_active_requests_{0}; + + // The number of requests that can be immediately dispatched + // if all CONNECTING connections become connected. + uint64_t connecting_request_capacity_{0}; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_pool_base_legacy.cc b/source/common/http/conn_pool_base_legacy.cc new file mode 100644 index 0000000000000..f41e6ea67b120 --- /dev/null +++ b/source/common/http/conn_pool_base_legacy.cc @@ -0,0 +1,95 @@ +#include "common/http/conn_pool_base_legacy.h" + +#include "common/stats/timespan_impl.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +ConnPoolImplBase::ActiveClient::ActiveClient(Event::Dispatcher& dispatcher, + const Upstream::ClusterInfo& cluster) + : connect_timer_(dispatcher.createTimer([this]() -> void { onConnectTimeout(); })) { + + conn_connect_ms_ = std::make_unique( + cluster.stats().upstream_cx_connect_ms_, dispatcher.timeSource()); + conn_length_ = std::make_unique( + cluster.stats().upstream_cx_length_ms_, dispatcher.timeSource()); + connect_timer_->enableTimer(cluster.connectTimeout()); +} + +void ConnPoolImplBase::ActiveClient::recordConnectionSetup() { + conn_connect_ms_->complete(); + conn_connect_ms_.reset(); +} + +void ConnPoolImplBase::ActiveClient::disarmConnectTimeout() { + if (connect_timer_) { + connect_timer_->disableTimer(); + connect_timer_.reset(); + } +} + +ConnPoolImplBase::ActiveClient::ConnectionState ConnPoolImplBase::ActiveClient::connectionState() { + // We don't track any failure state, as the client should be deferred destroyed once a failure + // event is handled. + if (connect_timer_) { + return Connecting; + } + + return Connected; +} + +ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, + ConnectionPool::Callbacks& callbacks) + : parent_(parent), decoder_(decoder), callbacks_(callbacks) { + parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); + parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); +} + +ConnPoolImplBase::PendingRequest::~PendingRequest() { + parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); +} + +ConnectionPool::Cancellable* +ConnPoolImplBase::newPendingRequest(StreamDecoder& decoder, ConnectionPool::Callbacks& callbacks) { + ENVOY_LOG(debug, "queueing request due to no available connections"); + PendingRequestPtr pending_request(new PendingRequest(*this, decoder, callbacks)); + pending_request->moveIntoList(std::move(pending_request), pending_requests_); + return pending_requests_.front().get(); +} + +void ConnPoolImplBase::purgePendingRequests( + const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason) { + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + pending_requests_to_purge_ = std::move(pending_requests_); + while (!pending_requests_to_purge_.empty()) { + PendingRequestPtr request = + pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); + host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); + request->callbacks_.onPoolFailure(ConnectionPool::PoolFailureReason::ConnectionFailure, + failure_reason, host_description); + } +} + +void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { + ENVOY_LOG(debug, "cancelling pending request"); + if (!pending_requests_to_purge_.empty()) { + // If pending_requests_to_purge_ is not empty, it means that we are called from + // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests + // is down in the call stack). Remove this request from the list as it is cancelled, + // and there is no need to call its onPoolFailure callback. + request.removeFromList(pending_requests_to_purge_); + } else { + request.removeFromList(pending_requests_); + } + + host_->cluster().stats().upstream_rq_cancelled_.inc(); + checkForDrained(); +} + +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/conn_pool_base_legacy.h b/source/common/http/conn_pool_base_legacy.h new file mode 100644 index 0000000000000..5a2cba34708db --- /dev/null +++ b/source/common/http/conn_pool_base_legacy.h @@ -0,0 +1,80 @@ +#pragma once + +#include "envoy/http/conn_pool.h" +#include "envoy/stats/timespan.h" + +#include "common/common/linked_object.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Http { +namespace Legacy { + +// Base class that handles request queueing logic shared between connection pool implementations. +class ConnPoolImplBase : protected Logger::Loggable { +protected: + ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority) + : host_(host), priority_(priority) {} + virtual ~ConnPoolImplBase() = default; + + // ActiveClient provides a base class for connection pool clients that handles connection timings + // as well as managing the connection timeout. + class ActiveClient { + public: + ActiveClient(Event::Dispatcher& dispatcher, const Upstream::ClusterInfo& cluster); + virtual ~ActiveClient() { conn_length_->complete(); } + + virtual void onConnectTimeout() PURE; + + void recordConnectionSetup(); + void disarmConnectTimeout(); + + enum ConnectionState { Connecting, Connected }; + ConnectionState connectionState(); + + private: + Event::TimerPtr connect_timer_; + Stats::TimespanPtr conn_connect_ms_; + Stats::TimespanPtr conn_length_; + }; + + struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { + PendingRequest(ConnPoolImplBase& parent, StreamDecoder& decoder, + ConnectionPool::Callbacks& callbacks); + ~PendingRequest() override; + + // ConnectionPool::Cancellable + void cancel() override { parent_.onPendingRequestCancel(*this); } + + ConnPoolImplBase& parent_; + StreamDecoder& decoder_; + ConnectionPool::Callbacks& callbacks_; + }; + + using PendingRequestPtr = std::unique_ptr; + + // Creates a new PendingRequest and enqueues it into the request queue. + ConnectionPool::Cancellable* newPendingRequest(StreamDecoder& decoder, + ConnectionPool::Callbacks& callbacks); + // Removes the PendingRequest from the list of requests. Called when the PendingRequest is + // cancelled, e.g. when the stream is reset before a connection has been established. + void onPendingRequestCancel(PendingRequest& request); + + // Fails all pending requests, calling onPoolFailure on the associated callbacks. + void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason); + + // Must be implemented by sub class. Attempts to drain inactive clients. + virtual void checkForDrained() PURE; + + const Upstream::HostConstSharedPtr host_; + const Upstream::ResourcePriority priority_; + std::list pending_requests_; + // When calling purgePendingRequests, this list will be used to hold the requests we are about + // to purge. We need this if one cancelled requests cancels a different pending request + std::list pending_requests_to_purge_; +}; +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index aca8bd1d8d0e0..dfc07f18b765a 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -44,9 +44,9 @@ envoy_cc_library( ) envoy_cc_library( - name = "conn_pool_lib", - srcs = ["conn_pool.cc"], - hdrs = ["conn_pool.h"], + name = "conn_pool_legacy_lib", + srcs = ["conn_pool_legacy.cc"], + hdrs = ["conn_pool_legacy.h"], external_deps = ["abseil_optional"], deps = [ "//include/envoy/event:deferred_deletable", @@ -63,7 +63,7 @@ envoy_cc_library( "//source/common/http:codec_client_lib", "//source/common/http:codec_wrappers_lib", "//source/common/http:codes_lib", - "//source/common/http:conn_pool_base_lib", + "//source/common/http:conn_pool_base_legacy_lib", "//source/common/http:headers_lib", "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", @@ -71,3 +71,26 @@ envoy_cc_library( "//source/common/upstream:upstream_lib", ], ) + +envoy_cc_library( + name = "conn_pool_lib", + srcs = ["conn_pool.cc"], + hdrs = ["conn_pool.h"], + external_deps = ["abseil_optional"], + deps = [ + ":conn_pool_legacy_lib", + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:timer_interface", + "//include/envoy/http:conn_pool_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/http:codec_client_lib", + "//source/common/http:codec_wrappers_lib", + "//source/common/http:codes_lib", + "//source/common/http:conn_pool_base_lib", + "//source/common/http:headers_lib", + "//source/common/runtime:runtime_lib", + "//source/common/upstream:upstream_lib", + ], +) diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 7a7f57592bae4..ec2dd558706ce 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -10,14 +10,11 @@ #include "envoy/http/header_map.h" #include "envoy/upstream/upstream.h" -#include "common/common/utility.h" #include "common/http/codec_client.h" #include "common/http/codes.h" #include "common/http/headers.h" -#include "common/network/utility.h" +#include "common/http/http1/conn_pool_legacy.h" #include "common/runtime/runtime_impl.h" -#include "common/stats/timespan_impl.h" -#include "common/upstream/upstream_impl.h" #include "absl/strings/match.h" @@ -28,168 +25,18 @@ namespace Http1 { ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options), - upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })), - settings_(settings) {} + : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options), + upstream_ready_timer_(dispatcher_.createTimer([this]() { + upstream_ready_enabled_ = false; + onUpstreamReady(); + })) {} -ConnPoolImpl::~ConnPoolImpl() { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - while (!busy_clients_.empty()) { - busy_clients_.front()->codec_client_->close(); - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImpl::drainConnections() { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - // We drain busy clients by manually setting remaining requests to 1. Thus, when the next - // response completes the client will be destroyed. - for (const auto& client : busy_clients_) { - client->remaining_requests_ = 1; - } -} - -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -bool ConnPoolImpl::hasActiveConnections() const { - return !pending_requests_.empty() || !busy_clients_.empty(); -} - -void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(!client.stream_wrapper_); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->stats().rq_total_.inc(); - client.stream_wrapper_ = std::make_unique(response_decoder, client); - callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_, - client.codec_client_->streamInfo()); -} - -void ConnPoolImpl::checkForDrained() { - if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_clients_.empty()) { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::createNewConnection() { - ENVOY_LOG(debug, "creating a new connection"); - ActiveClientPtr client(new ActiveClient(*this)); - client->moveIntoList(std::move(client), busy_clients_); -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!ready_clients_.empty()) { - ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); - ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); - attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); - return nullptr; - } - - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - - // If we have no connections at all, make one no matter what so we don't starve. - if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { - createNewConnection(); - } - - return newPendingRequest(response_decoder, callbacks); - } else { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } -} +ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - // The client died. - ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - ActiveClientPtr removed; - bool check_for_drained = true; - if (client.stream_wrapper_) { - if (!client.stream_wrapper_->decode_complete_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - // There is an active request attached to this client. The underlying codec client will - // already have "reset" the stream to fire the reset callback. All we do here is just - // destroy the client. - removed = client.removeFromList(busy_clients_); - } else if (client.connectionState() == - ConnPoolImplBase::ActiveClient::ConnectionState::Connected) { - removed = client.removeFromList(ready_clients_); - check_for_drained = false; - } else { - // The only time this happens is if we actually saw a connect failure. - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - removed = client.removeFromList(busy_clients_); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - ENVOY_CONN_LOG(debug, "purge pending, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - purgePendingRequests(client.real_host_description_, - client.codec_client_->connectionFailureReason()); - } - - dispatcher_.deferredDelete(std::move(removed)); - - // If we have pending requests and we just lost a connection we should make a new one. - if (pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) { - createNewConnection(); - } - - if (check_for_drained) { - checkForDrained(); - } - } - - client.disarmConnectTimeout(); - - // Note that the order in this function is important. Concretely, we must destroy the connect - // timer before we process a connected idle client, because if this results in an immediate - // drain/destruction event, we key off of the existence of the connect timer above to determine - // whether the client is in the ready list (connected) or the busy list (failed to connect). - if (event == Network::ConnectionEvent::Connected) { - client.recordConnectionSetup(); - processIdleClient(client, false); - } +ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { + return std::make_unique(*this); } void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { @@ -199,60 +46,23 @@ void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { void ConnPoolImpl::onResponseComplete(ActiveClient& client) { ENVOY_CONN_LOG(debug, "response complete", *client.codec_client_); + if (!client.stream_wrapper_->encode_complete_) { ENVOY_CONN_LOG(debug, "response before request complete", *client.codec_client_); onDownstreamReset(client); } else if (client.stream_wrapper_->close_connection_ || client.codec_client_->remoteClosed()) { ENVOY_CONN_LOG(debug, "saw upstream close connection", *client.codec_client_); onDownstreamReset(client); - } else if (client.remaining_requests_ > 0 && --client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection", *client.codec_client_); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - onDownstreamReset(client); } else { - // Upstream connection might be closed right after response is complete. Setting delay=true - // here to attach pending requests in next dispatcher loop to handle that case. - // https://github.com/envoyproxy/envoy/issues/2715 - processIdleClient(client, true); - } -} + client.stream_wrapper_.reset(); -void ConnPoolImpl::onUpstreamReady() { - upstream_ready_enabled_ = false; - while (!pending_requests_.empty() && !ready_clients_.empty()) { - ActiveClient& client = *ready_clients_.front(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - // There is work to do so bind a request to the client and move it to the busy list. Pending - // requests are pushed onto the front, so pull from the back. - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - client.moveBetweenLists(ready_clients_, busy_clients_); - } -} - -void ConnPoolImpl::processIdleClient(ActiveClient& client, bool delay) { - client.stream_wrapper_.reset(); - if (pending_requests_.empty() || delay) { - // There is nothing to service or delayed processing is requested, so just move the connection - // into the ready list. - ENVOY_CONN_LOG(debug, "moving to ready", *client.codec_client_); - client.moveBetweenLists(busy_clients_, ready_clients_); - } else { - // There is work to do immediately so bind a request to the client and move it to the busy list. - // Pending requests are pushed onto the front, so pull from the back. - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } + if (!pending_requests_.empty() && !upstream_ready_enabled_) { + upstream_ready_enabled_ = true; + upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + } - if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { - upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + checkForDrained(); } - - checkForDrained(); } ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, ActiveClient& parent) @@ -260,22 +70,13 @@ ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, Acti StreamDecoderWrapper(response_decoder), parent_(parent) { StreamEncoderWrapper::inner_.getStream().addCallbacks(*this); - parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); - parent_.parent_.host_->stats().rq_active_.inc(); - - // TODO (tonya11en): At the time of writing, there is no way to mix different versions of HTTP - // traffic in the same cluster, so incrementing the request count in the per-cluster resource - // manager will not affect circuit breaking in any unexpected ways. Ideally, outstanding requests - // counts would be tracked the same way in all HTTP versions. - // - // See: https://github.com/envoyproxy/envoy/issues/9215 - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().inc(); } ConnPoolImpl::StreamWrapper::~StreamWrapper() { - parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); - parent_.parent_.host_->stats().rq_active_.dec(); - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().dec(); + // Upstream connection might be closed right after response is complete. Setting delay=true + // here to attach pending requests in next dispatcher loop to handle that case. + // https://github.com/envoyproxy/envoy/issues/2715 + parent_.parent().onRequestClosed(parent_, true); } void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } @@ -303,46 +104,27 @@ void ConnPoolImpl::StreamWrapper::decodeHeaders(HeaderMapPtr&& headers, bool end void ConnPoolImpl::StreamWrapper::onDecodeComplete() { decode_complete_ = encode_complete_; - parent_.parent_.onResponseComplete(parent_); + parent_.parent().onResponseComplete(parent_); } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent), - remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { - - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - codec_client_ = parent_.createCodecClient(data); - codec_client_->addConnectionCallbacks(*this); - - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); - - codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); + : ConnPoolImplBase::ActiveClient( + parent, parent.host_->cluster().maxRequestsPerConnection(), + 1 // HTTP1 always has a concurrent-request-limit of 1 per connection. + ) { + parent.host_->cluster().stats().upstream_cx_http1_total_.inc(); } -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); +bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { return stream_wrapper_ != nullptr; } + +bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { + return (stream_wrapper_ != nullptr) && (!stream_wrapper_->decode_complete_); } -void ConnPoolImpl::ActiveClient::onConnectTimeout() { - // We just close the client at this point. This will result in both a timeout and a connect - // failure and will fold into all the normal connect failure logic. - ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - codec_client_->close(); +StreamEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(StreamDecoder& response_decoder) { + ASSERT(!stream_wrapper_); + stream_wrapper_ = std::make_unique(response_decoder, *this); + return *stream_wrapper_; } CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { @@ -351,6 +133,21 @@ CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnect return codec; } +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.new_http1_connection_pool_behavior")) { + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); + } else { + return std::make_unique( + dispatcher, host, priority, options, transport_socket_options); + } +} + } // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 9efd2df84375f..c9b013b126762 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -1,24 +1,12 @@ #pragma once -#include -#include -#include - -#include "envoy/event/deferred_deletable.h" #include "envoy/event/timer.h" #include "envoy/http/codec.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" #include "envoy/upstream/upstream.h" -#include "common/common/linked_object.h" -#include "common/http/codec_client.h" #include "common/http/codec_wrappers.h" #include "common/http/conn_pool_base.h" -#include "absl/types/optional.h" - namespace Envoy { namespace Http { namespace Http1 { @@ -29,27 +17,20 @@ namespace Http1 { * address. Higher layer code should handle resolving DNS on error and creating a new pool * bound to a different IP address. */ -class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { +class ConnPoolImpl : public ConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); ~ConnPoolImpl() override; // ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http11; } - void addDrainedCallback(DrainedCb cb) override; - void drainConnections() override; - bool hasActiveConnections() const override; - ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; // ConnPoolImplBase - void checkForDrained() override; + ActiveClientPtr instantiateActiveClient() override; protected: struct ActiveClient; @@ -70,7 +51,7 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { // Http::StreamCallbacks void onResetStream(StreamResetReason, absl::string_view) override { - parent_.parent_.onDownstreamReset(parent_); + parent_.parent().onDownstreamReset(parent_); } void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} @@ -83,50 +64,26 @@ class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { using StreamWrapperPtr = std::unique_ptr; - struct ActiveClient : ConnPoolImplBase::ActiveClient, - LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable { + struct ActiveClient : public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; - void onConnectTimeout() override; + ConnPoolImpl& parent() { return static_cast(parent_); } - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} + // ConnPoolImplBase::ActiveClient + bool hasActiveRequests() const override; + bool closingWithIncompleteRequest() const override; + StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) override; - ConnPoolImpl& parent_; - CodecClientPtr codec_client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; StreamWrapperPtr stream_wrapper_; - uint64_t remaining_requests_; }; - using ActiveClientPtr = std::unique_ptr; - - void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks); - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - void createNewConnection(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); void onDownstreamReset(ActiveClient& client); void onResponseComplete(ActiveClient& client); - void onUpstreamReady(); - void processIdleClient(ActiveClient& client, bool delay); - - Event::Dispatcher& dispatcher_; - std::list ready_clients_; - std::list busy_clients_; - std::list drained_callbacks_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + ActiveClient& firstReady() const { return static_cast(*ready_clients_.front()); } + ActiveClient& firstBusy() const { return static_cast(*busy_clients_.front()); } + Event::TimerPtr upstream_ready_timer_; bool upstream_ready_enabled_{false}; - const Http1Settings settings_; }; /** @@ -137,14 +94,19 @@ class ProdConnPoolImpl : public ConnPoolImpl { ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Http::Http1Settings& settings, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImpl(dispatcher, host, priority, options, settings, transport_socket_options) {} + : ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options) {} // ConnPoolImpl CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; }; +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + } // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/conn_pool_legacy.cc b/source/common/http/http1/conn_pool_legacy.cc new file mode 100644 index 0000000000000..6f85335298434 --- /dev/null +++ b/source/common/http/http1/conn_pool_legacy.cc @@ -0,0 +1,356 @@ +#include "common/http/http1/conn_pool_legacy.h" + +#include +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/http/codec.h" +#include "envoy/http/header_map.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/utility.h" +#include "common/http/codec_client.h" +#include "common/http/codes.h" +#include "common/http/headers.h" +#include "common/network/utility.h" +#include "common/runtime/runtime_impl.h" +#include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" + +#include "absl/strings/match.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { + +ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), + socket_options_(options), transport_socket_options_(transport_socket_options), + upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })) {} + +ConnPoolImpl::~ConnPoolImpl() { + while (!ready_clients_.empty()) { + ready_clients_.front()->codec_client_->close(); + } + + while (!busy_clients_.empty()) { + busy_clients_.front()->codec_client_->close(); + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void ConnPoolImpl::drainConnections() { + while (!ready_clients_.empty()) { + ready_clients_.front()->codec_client_->close(); + } + + // We drain busy clients by manually setting remaining requests to 1. Thus, when the next + // response completes the client will be destroyed. + for (const auto& client : busy_clients_) { + client->remaining_requests_ = 1; + } +} + +void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +bool ConnPoolImpl::hasActiveConnections() const { + return !pending_requests_.empty() || !busy_clients_.empty(); +} + +void ConnPoolImpl::attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + ASSERT(!client.stream_wrapper_); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->stats().rq_total_.inc(); + client.stream_wrapper_ = std::make_unique(response_decoder, client); + callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_, + client.codec_client_->streamInfo()); +} + +void ConnPoolImpl::checkForDrained() { + if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_clients_.empty()) { + while (!ready_clients_.empty()) { + ready_clients_.front()->codec_client_->close(); + } + + for (const DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImpl::createNewConnection() { + ENVOY_LOG(debug, "creating a new connection"); + ActiveClientPtr client(new ActiveClient(*this)); + client->moveIntoList(std::move(client), busy_clients_); +} + +ConnectionPool::Cancellable* ConnPoolImpl::newStream(StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + if (!ready_clients_.empty()) { + ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); + ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); + attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); + return nullptr; + } + + if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + + // If we have no connections at all, make one no matter what so we don't starve. + if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { + createNewConnection(); + } + + return newPendingRequest(response_decoder, callbacks); + } else { + ENVOY_LOG(debug, "max pending requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } +} + +void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + // The client died. + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + ActiveClientPtr removed; + bool check_for_drained = true; + if (client.stream_wrapper_) { + if (!client.stream_wrapper_->decode_complete_) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + // There is an active request attached to this client. The underlying codec client will + // already have "reset" the stream to fire the reset callback. All we do here is just + // destroy the client. + removed = client.removeFromList(busy_clients_); + } else if (client.connectionState() == + ConnPoolImplBase::ActiveClient::ConnectionState::Connected) { + removed = client.removeFromList(ready_clients_); + check_for_drained = false; + } else { + // The only time this happens is if we actually saw a connect failure. + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + removed = client.removeFromList(busy_clients_); + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + ENVOY_CONN_LOG(debug, "purge pending, failure reason: {}", *client.codec_client_, + client.codec_client_->connectionFailureReason()); + purgePendingRequests(client.real_host_description_, + client.codec_client_->connectionFailureReason()); + } + + dispatcher_.deferredDelete(std::move(removed)); + + // If we have pending requests and we just lost a connection we should make a new one. + if (pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) { + createNewConnection(); + } + + if (check_for_drained) { + checkForDrained(); + } + } + + client.disarmConnectTimeout(); + + // Note that the order in this function is important. Concretely, we must destroy the connect + // timer before we process a connected idle client, because if this results in an immediate + // drain/destruction event, we key off of the existence of the connect timer above to determine + // whether the client is in the ready list (connected) or the busy list (failed to connect). + if (event == Network::ConnectionEvent::Connected) { + client.recordConnectionSetup(); + processIdleClient(client, false); + } +} + +void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { + // If we get a downstream reset to an attached client, we just blow it away. + client.codec_client_->close(); +} + +void ConnPoolImpl::onResponseComplete(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "response complete", *client.codec_client_); + if (!client.stream_wrapper_->encode_complete_) { + ENVOY_CONN_LOG(debug, "response before request complete", *client.codec_client_); + onDownstreamReset(client); + } else if (client.stream_wrapper_->close_connection_ || client.codec_client_->remoteClosed()) { + ENVOY_CONN_LOG(debug, "saw upstream close connection", *client.codec_client_); + onDownstreamReset(client); + } else if (client.remaining_requests_ > 0 && --client.remaining_requests_ == 0) { + ENVOY_CONN_LOG(debug, "maximum requests per connection", *client.codec_client_); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + onDownstreamReset(client); + } else { + // Upstream connection might be closed right after response is complete. Setting delay=true + // here to attach pending requests in next dispatcher loop to handle that case. + // https://github.com/envoyproxy/envoy/issues/2715 + processIdleClient(client, true); + } +} + +void ConnPoolImpl::onUpstreamReady() { + upstream_ready_enabled_ = false; + while (!pending_requests_.empty() && !ready_clients_.empty()) { + ActiveClient& client = *ready_clients_.front(); + ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); + // There is work to do so bind a request to the client and move it to the busy list. Pending + // requests are pushed onto the front, so pull from the back. + attachRequestToClient(client, pending_requests_.back()->decoder_, + pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + client.moveBetweenLists(ready_clients_, busy_clients_); + } +} + +void ConnPoolImpl::processIdleClient(ActiveClient& client, bool delay) { + client.stream_wrapper_.reset(); + if (pending_requests_.empty() || delay) { + // There is nothing to service or delayed processing is requested, so just move the connection + // into the ready list. + ENVOY_CONN_LOG(debug, "moving to ready", *client.codec_client_); + client.moveBetweenLists(busy_clients_, ready_clients_); + } else { + // There is work to do immediately so bind a request to the client and move it to the busy list. + // Pending requests are pushed onto the front, so pull from the back. + ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); + attachRequestToClient(client, pending_requests_.back()->decoder_, + pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } + + if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { + upstream_ready_enabled_ = true; + upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + } + + checkForDrained(); +} + +ConnPoolImpl::StreamWrapper::StreamWrapper(StreamDecoder& response_decoder, ActiveClient& parent) + : StreamEncoderWrapper(parent.codec_client_->newStream(*this)), + StreamDecoderWrapper(response_decoder), parent_(parent) { + + StreamEncoderWrapper::inner_.getStream().addCallbacks(*this); + parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); + parent_.parent_.host_->stats().rq_active_.inc(); + + // TODO (tonya11en): At the time of writing, there is no way to mix different versions of HTTP + // traffic in the same cluster, so incrementing the request count in the per-cluster resource + // manager will not affect circuit breaking in any unexpected ways. Ideally, outstanding requests + // counts would be tracked the same way in all HTTP versions. + // + // See: https://github.com/envoyproxy/envoy/issues/9215 + parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().inc(); +} + +ConnPoolImpl::StreamWrapper::~StreamWrapper() { + parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); + parent_.parent_.host_->stats().rq_active_.dec(); + parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().dec(); +} + +void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } + +void ConnPoolImpl::StreamWrapper::decodeHeaders(HeaderMapPtr&& headers, bool end_stream) { + // If Connection: close OR + // Http/1.0 and not Connection: keep-alive OR + // Proxy-Connection: close + if ((headers->Connection() && + (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), + Headers::get().ConnectionValues.Close))) || + (parent_.codec_client_->protocol() == Protocol::Http10 && + (!headers->Connection() || + !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), + Headers::get().ConnectionValues.KeepAlive))) || + (headers->ProxyConnection() && + (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), + Headers::get().ConnectionValues.Close)))) { + parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); + close_connection_ = true; + } + + StreamDecoderWrapper::decodeHeaders(std::move(headers), end_stream); +} + +void ConnPoolImpl::StreamWrapper::onDecodeComplete() { + decode_complete_ = encode_complete_; + parent_.parent_.onResponseComplete(parent_); +} + +ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) + : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent), + remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { + + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + codec_client_ = parent_.createCodecClient(data); + codec_client_->addConnectionCallbacks(*this); + + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); + + codec_client_->setConnectionStats( + {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); +} + +ConnPoolImpl::ActiveClient::~ActiveClient() { + parent_.host_->cluster().stats().upstream_cx_active_.dec(); + parent_.host_->stats().cx_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); +} + +void ConnPoolImpl::ActiveClient::onConnectTimeout() { + // We just close the client at this point. This will result in both a timeout and a connect + // failure and will fold into all the normal connect failure logic. + ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); + parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + codec_client_->close(); +} + +CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { + CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_), + data.host_description_, dispatcher_)}; + return codec; +} + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/conn_pool_legacy.h b/source/common/http/http1/conn_pool_legacy.h new file mode 100644 index 0000000000000..e2f9aa0309888 --- /dev/null +++ b/source/common/http/http1/conn_pool_legacy.h @@ -0,0 +1,149 @@ +#pragma once + +#include +#include +#include + +#include "envoy/event/deferred_deletable.h" +#include "envoy/event/timer.h" +#include "envoy/http/codec.h" +#include "envoy/http/conn_pool.h" +#include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/linked_object.h" +#include "common/http/codec_client.h" +#include "common/http/codec_wrappers.h" +#include "common/http/conn_pool_base_legacy.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { + +/** + * A connection pool implementation for HTTP/1.1 connections. + * NOTE: The connection pool does NOT do DNS resolution. It assumes it is being given a numeric IP + * address. Higher layer code should handle resolving DNS on error and creating a new pool + * bound to a different IP address. + */ +class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { +public: + ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + + ~ConnPoolImpl() override; + + // ConnectionPool::Instance + Http::Protocol protocol() const override { return Http::Protocol::Http11; } + void addDrainedCallback(DrainedCb cb) override; + void drainConnections() override; + bool hasActiveConnections() const override; + ConnectionPool::Cancellable* newStream(StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + + // ConnPoolImplBase + void checkForDrained() override; + +protected: + struct ActiveClient; + + struct StreamWrapper : public StreamEncoderWrapper, + public StreamDecoderWrapper, + public StreamCallbacks { + StreamWrapper(StreamDecoder& response_decoder, ActiveClient& parent); + ~StreamWrapper() override; + + // StreamEncoderWrapper + void onEncodeComplete() override; + + // StreamDecoderWrapper + void decodeHeaders(HeaderMapPtr&& headers, bool end_stream) override; + void onPreDecodeComplete() override {} + void onDecodeComplete() override; + + // Http::StreamCallbacks + void onResetStream(StreamResetReason, absl::string_view) override { + parent_.parent_.onDownstreamReset(parent_); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + ActiveClient& parent_; + bool encode_complete_{}; + bool close_connection_{}; + bool decode_complete_{}; + }; + + using StreamWrapperPtr = std::unique_ptr; + + struct ActiveClient : ConnPoolImplBase::ActiveClient, + LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable { + ActiveClient(ConnPoolImpl& parent); + ~ActiveClient() override; + + void onConnectTimeout() override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, event); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + ConnPoolImpl& parent_; + CodecClientPtr codec_client_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + StreamWrapperPtr stream_wrapper_; + uint64_t remaining_requests_; + }; + + using ActiveClientPtr = std::unique_ptr; + + void attachRequestToClient(ActiveClient& client, StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks); + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; + void createNewConnection(); + void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); + void onDownstreamReset(ActiveClient& client); + void onResponseComplete(ActiveClient& client); + void onUpstreamReady(); + void processIdleClient(ActiveClient& client, bool delay); + + Event::Dispatcher& dispatcher_; + std::list ready_clients_; + std::list busy_clients_; + std::list drained_callbacks_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + Event::TimerPtr upstream_ready_timer_; + bool upstream_ready_enabled_{false}; +}; + +/** + * Production implementation of the ConnPoolImpl. + */ +class ProdConnPoolImpl : public ConnPoolImpl { +public: + ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options) {} + + // ConnPoolImpl + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; +}; + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 6cc9f0ae451f4..ad8d93119ae51 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -57,9 +57,9 @@ envoy_cc_library( ) envoy_cc_library( - name = "conn_pool_lib", - srcs = ["conn_pool.cc"], - hdrs = ["conn_pool.h"], + name = "conn_pool_legacy_lib", + srcs = ["conn_pool_legacy.cc"], + hdrs = ["conn_pool_legacy.h"], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", @@ -68,13 +68,26 @@ envoy_cc_library( "//include/envoy/stats:timespan_interface", "//include/envoy/upstream:upstream_interface", "//source/common/http:codec_client_lib", - "//source/common/http:conn_pool_base_lib", + "//source/common/http:conn_pool_base_legacy_lib", "//source/common/network:utility_lib", "//source/common/stats:timespan_lib", "//source/common/upstream:upstream_lib", ], ) +envoy_cc_library( + name = "conn_pool_lib", + srcs = ["conn_pool.cc"], + hdrs = ["conn_pool.h"], + deps = [ + ":conn_pool_legacy_lib", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/http:codec_client_lib", + "//source/common/http:conn_pool_base_lib", + ], +) + envoy_cc_library( name = "metadata_encoder_lib", srcs = ["metadata_encoder.cc"], diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 6e6b249006f87..2fc988a8fe645 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -1,15 +1,12 @@ #include "common/http/http2/conn_pool.h" #include -#include #include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" #include "envoy/upstream/upstream.h" #include "common/http/http2/codec_impl.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" +#include "common/http/http2/conn_pool_legacy.h" namespace Envoy { namespace Http { @@ -19,224 +16,28 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options) {} + : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options) {} -ConnPoolImpl::~ConnPoolImpl() { - if (primary_client_) { - primary_client_->client_->close(); - } - - if (draining_client_) { - draining_client_->client_->close(); - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImpl::ConnPoolImpl::drainConnections() { - if (primary_client_ != nullptr) { - movePrimaryClientToDraining(); - } -} - -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -bool ConnPoolImpl::hasActiveConnections() const { - if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { - return true; - } - - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - return true; - } - - return !pending_requests_.empty(); -} - -void ConnPoolImpl::checkForDrained() { - if (drained_callbacks_.empty()) { - return; - } - - bool drained = true; - if (primary_client_) { - if (primary_client_->client_->numActiveRequests() == 0) { - primary_client_->client_->close(); - ASSERT(!primary_client_); - } else { - drained = false; - } - } - - ASSERT(!draining_client_ || (draining_client_->client_->numActiveRequests() > 0)); - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - drained = false; - } - - if (drained) { - ENVOY_LOG(debug, "invoking drained callbacks"); - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::newClientStream(Http::StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { - ENVOY_LOG(debug, "max requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - } else { - ENVOY_CONN_LOG(debug, "creating stream", *primary_client_->client_); - primary_client_->total_streams_++; - host_->stats().rq_total_.inc(); - host_->stats().rq_active_.inc(); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->cluster().stats().upstream_rq_active_.inc(); - host_->cluster().resourceManager(priority_).requests().inc(); - callbacks.onPoolReady(primary_client_->client_->newStream(response_decoder), - primary_client_->real_host_description_, - primary_client_->client_->streamInfo()); - } -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(Http::StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(drained_callbacks_.empty()); - - // First see if we need to handle max streams rollover. - uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); - if (max_streams == 0) { - max_streams = maxTotalStreams(); - } - - if (primary_client_ && primary_client_->total_streams_ >= max_streams) { - movePrimaryClientToDraining(); - } - - if (!primary_client_) { - primary_client_ = std::make_unique(*this); - } - - // If the primary client is not connected yet, queue up the request. - if (!primary_client_->upstream_ready_) { - // If we're not allowed to enqueue more requests, fail fast. - if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } - - return newPendingRequest(response_decoder, callbacks); - } - - // We already have an active client that's connected to upstream, so attempt to establish a - // new stream. - newClientStream(response_decoder, callbacks); - return nullptr; -} - -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - ENVOY_CONN_LOG(debug, "client disconnected", *client.client_); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - if (client.closed_with_active_rq_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - if (client.connectionState() == ConnPoolImplBase::ActiveClient::ConnectionState::Connecting) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_, - client.client_->connectionFailureReason()); - } - - if (&client == primary_client_.get()) { - ENVOY_CONN_LOG(debug, "destroying primary client", *client.client_); - dispatcher_.deferredDelete(std::move(primary_client_)); - } else { - ENVOY_CONN_LOG(debug, "destroying draining client", *client.client_); - dispatcher_.deferredDelete(std::move(draining_client_)); - } - - if (client.closed_with_active_rq_) { - checkForDrained(); - } - } - - if (event == Network::ConnectionEvent::Connected) { - client.recordConnectionSetup(); - - client.upstream_ready_ = true; - onUpstreamReady(); - } +ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } - client.disarmConnectTimeout(); +ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { + return std::make_unique(*this); } - -void ConnPoolImpl::movePrimaryClientToDraining() { - ENVOY_CONN_LOG(debug, "moving primary to draining", *primary_client_->client_); - if (draining_client_) { - // This should pretty much never happen, but is possible if we start draining and then get - // a goaway for example. In this case just kill the current draining connection. It's not - // worth keeping a list. - draining_client_->client_->close(); - } - - ASSERT(!draining_client_); - if (primary_client_->client_->numActiveRequests() == 0) { - // If we are making a new connection and the primary does not have any active requests just - // close it now. - primary_client_->client_->close(); - } else { - draining_client_ = std::move(primary_client_); - } - - ASSERT(!primary_client_); -} - -void ConnPoolImpl::onConnectTimeout(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "connect timeout", *client.client_); - host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - client.client_->close(); -} - void ConnPoolImpl::onGoAway(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "remote goaway", *client.client_); + ENVOY_CONN_LOG(debug, "remote goaway", *client.codec_client_); host_->cluster().stats().upstream_cx_close_notify_.inc(); - if (&client == primary_client_.get()) { - movePrimaryClientToDraining(); + if (client.state_ != ActiveClient::State::DRAINING) { + if (client.codec_client_->numActiveRequests() == 0) { + client.codec_client_->close(); + } else { + transitionActiveClientState(client, ActiveClient::State::DRAINING); + } } } void ConnPoolImpl::onStreamDestroy(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.client_, - client.client_->numActiveRequests()); - host_->stats().rq_active_.dec(); - host_->cluster().stats().upstream_rq_active_.dec(); - host_->cluster().resourceManager(priority_).requests().dec(); - if (&client == draining_client_.get() && client.client_->numActiveRequests() == 0) { - // Close out the draining client if we no long have active requests. - client.client_->close(); - } + onRequestClosed(client, false); // If we are destroying this stream because of a disconnect, do not check for drain here. We will // wait until the connection has been fully drained of streams and then check in the connection @@ -258,40 +59,31 @@ void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason r } } -void ConnPoolImpl::onUpstreamReady() { - // Establishes new codec streams for each pending request. - while (!pending_requests_.empty()) { - newClientStream(pending_requests_.back()->decoder_, pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } +uint64_t ConnPoolImpl::maxRequestsPerConnection() { + uint64_t max_streams_config = host_->cluster().maxRequestsPerConnection(); + return (max_streams_config != 0) ? max_streams_config : DEFAULT_MAX_STREAMS; } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent) { - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - client_ = parent_.createCodecClient(data); - client_->addConnectionCallbacks(*this); - client_->setCodecClientCallbacks(*this); - client_->setCodecConnectionCallbacks(*this); + : ConnPoolImplBase::ActiveClient( + parent, parent.maxRequestsPerConnection(), + parent.host_->cluster().http2Settings().max_concurrent_streams_) { + codec_client_->setCodecClientCallbacks(*this); + codec_client_->setCodecConnectionCallbacks(*this); + + parent.host_->cluster().stats().upstream_cx_http2_total_.inc(); +} - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); +bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { + return codec_client_->numActiveRequests() > 0; +} - client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); +bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { + return closed_with_active_rq_; } -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().stats().upstream_cx_active_.dec(); +StreamEncoder& ConnPoolImpl::ActiveClient::newStreamEncoder(StreamDecoder& response_decoder) { + return codec_client_->newStream(response_decoder); } CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { @@ -300,7 +92,20 @@ CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnect return codec; } -uint32_t ProdConnPoolImpl::maxTotalStreams() { return MAX_STREAMS; } +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.new_http2_connection_pool_behavior")) { + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); + } else { + return std::make_unique( + dispatcher, host, priority, options, transport_socket_options); + } +} } // namespace Http2 } // namespace Http diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index ab2a000ea01e0..30685c9a37f82 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -1,13 +1,7 @@ #pragma once #include -#include -#include -#include "envoy/event/timer.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" #include "envoy/upstream/upstream.h" #include "common/http/codec_client.h" @@ -22,80 +16,56 @@ namespace Http2 { * shifting to a new connection if we reach max streams on the primary. This is a base class * used for both the prod implementation as well as the testing one. */ -class ConnPoolImpl : public ConnectionPool::Instance, public ConnPoolImplBase { +class ConnPoolImpl : public ConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + ~ConnPoolImpl() override; // Http::ConnectionPool::Instance Http::Protocol protocol() const override { return Http::Protocol::Http2; } - void addDrainedCallback(DrainedCb cb) override; - void drainConnections() override; - bool hasActiveConnections() const override; - ConnectionPool::Cancellable* newStream(Http::StreamDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + + // ConnPoolImplBase + ActiveClientPtr instantiateActiveClient() override; protected: - struct ActiveClient : ConnPoolImplBase::ActiveClient, - public Network::ConnectionCallbacks, - public CodecClientCallbacks, - public Event::DeferredDeletable, - public Http::ConnectionCallbacks { + struct ActiveClient : public CodecClientCallbacks, + public Http::ConnectionCallbacks, + public ConnPoolImplBase::ActiveClient { ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; + ~ActiveClient() override = default; - void onConnectTimeout() override { parent_.onConnectTimeout(*this); } + ConnPoolImpl& parent() { return static_cast(parent_); } - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} + // ConnPoolImpl::ActiveClient + bool hasActiveRequests() const override; + bool closingWithIncompleteRequest() const override; + StreamEncoder& newStreamEncoder(StreamDecoder& response_decoder) override; // CodecClientCallbacks - void onStreamDestroy() override { parent_.onStreamDestroy(*this); } + void onStreamDestroy() override { parent().onStreamDestroy(*this); } void onStreamReset(Http::StreamResetReason reason) override { - parent_.onStreamReset(*this, reason); + parent().onStreamReset(*this, reason); } // Http::ConnectionCallbacks - void onGoAway() override { parent_.onGoAway(*this); } + void onGoAway() override { parent().onGoAway(*this); } - ConnPoolImpl& parent_; - CodecClientPtr client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - uint64_t total_streams_{}; - bool upstream_ready_{}; bool closed_with_active_rq_{}; }; - using ActiveClientPtr = std::unique_ptr; - - // Http::ConnPoolImplBase - void checkForDrained() override; - - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - virtual uint32_t maxTotalStreams() PURE; + uint64_t maxRequestsPerConnection(); void movePrimaryClientToDraining(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); - void onConnectTimeout(ActiveClient& client); void onGoAway(ActiveClient& client); void onStreamDestroy(ActiveClient& client); void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); - void newClientStream(Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); - void onUpstreamReady(); - - Event::Dispatcher& dispatcher_; - ActiveClientPtr primary_client_; - ActiveClientPtr draining_client_; - std::list drained_callbacks_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + + // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe + // side we do 2^29. + static const uint64_t DEFAULT_MAX_STREAMS = (1 << 29); }; /** @@ -107,13 +77,14 @@ class ProdConnPoolImpl : public ConnPoolImpl { private: CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; - uint32_t maxTotalStreams() override; - - // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe - // side we do 2^29. - static const uint64_t MAX_STREAMS = (1 << 29); }; +ConnectionPool::InstancePtr +allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http2/conn_pool_legacy.cc b/source/common/http/http2/conn_pool_legacy.cc new file mode 100644 index 0000000000000..9e4c271fd6a5b --- /dev/null +++ b/source/common/http/http2/conn_pool_legacy.cc @@ -0,0 +1,309 @@ +#include "common/http/http2/conn_pool_legacy.h" + +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/upstream/upstream.h" + +#include "common/http/http2/codec_impl.h" +#include "common/network/utility.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), + socket_options_(options), transport_socket_options_(transport_socket_options) {} + +ConnPoolImpl::~ConnPoolImpl() { + if (primary_client_) { + primary_client_->client_->close(); + } + + if (draining_client_) { + draining_client_->client_->close(); + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void ConnPoolImpl::ConnPoolImpl::drainConnections() { + if (primary_client_ != nullptr) { + movePrimaryClientToDraining(); + } +} + +void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +bool ConnPoolImpl::hasActiveConnections() const { + if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { + return true; + } + + if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { + return true; + } + + return !pending_requests_.empty(); +} + +void ConnPoolImpl::checkForDrained() { + if (drained_callbacks_.empty()) { + return; + } + + bool drained = true; + if (primary_client_) { + if (primary_client_->client_->numActiveRequests() == 0) { + primary_client_->client_->close(); + ASSERT(!primary_client_); + } else { + drained = false; + } + } + + ASSERT(!draining_client_ || (draining_client_->client_->numActiveRequests() > 0)); + if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { + drained = false; + } + + if (drained) { + ENVOY_LOG(debug, "invoking drained callbacks"); + for (const DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImpl::newClientStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { + ENVOY_LOG(debug, "max requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + } else { + ENVOY_CONN_LOG(debug, "creating stream", *primary_client_->client_); + primary_client_->total_streams_++; + host_->stats().rq_total_.inc(); + host_->stats().rq_active_.inc(); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->cluster().stats().upstream_rq_active_.inc(); + host_->cluster().resourceManager(priority_).requests().inc(); + callbacks.onPoolReady(primary_client_->client_->newStream(response_decoder), + primary_client_->real_host_description_, + primary_client_->client_->streamInfo()); + } +} + +ConnectionPool::Cancellable* ConnPoolImpl::newStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) { + ASSERT(drained_callbacks_.empty()); + + // First see if we need to handle max streams rollover. + uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); + if (max_streams == 0) { + max_streams = maxTotalStreams(); + } + + if (primary_client_ && primary_client_->total_streams_ >= max_streams) { + movePrimaryClientToDraining(); + } + + if (!primary_client_) { + primary_client_ = std::make_unique(*this); + } + + // If the primary client is not connected yet, queue up the request. + if (!primary_client_->upstream_ready_) { + // If we're not allowed to enqueue more requests, fail fast. + if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + ENVOY_LOG(debug, "max pending requests overflow"); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), + nullptr); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } + + return newPendingRequest(response_decoder, callbacks); + } + + // We already have an active client that's connected to upstream, so attempt to establish a + // new stream. + newClientStream(response_decoder, callbacks); + return nullptr; +} + +void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + ENVOY_CONN_LOG(debug, "client disconnected", *client.client_); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + if (client.closed_with_active_rq_) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + if (client.connectionState() == ConnPoolImplBase::ActiveClient::ConnectionState::Connecting) { + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + purgePendingRequests(client.real_host_description_, + client.client_->connectionFailureReason()); + } + + if (&client == primary_client_.get()) { + ENVOY_CONN_LOG(debug, "destroying primary client", *client.client_); + dispatcher_.deferredDelete(std::move(primary_client_)); + } else { + ENVOY_CONN_LOG(debug, "destroying draining client", *client.client_); + dispatcher_.deferredDelete(std::move(draining_client_)); + } + + if (client.closed_with_active_rq_) { + checkForDrained(); + } + } + + if (event == Network::ConnectionEvent::Connected) { + client.recordConnectionSetup(); + + client.upstream_ready_ = true; + onUpstreamReady(); + } + + client.disarmConnectTimeout(); +} + +void ConnPoolImpl::movePrimaryClientToDraining() { + ENVOY_CONN_LOG(debug, "moving primary to draining", *primary_client_->client_); + if (draining_client_) { + // This should pretty much never happen, but is possible if we start draining and then get + // a goaway for example. In this case just kill the current draining connection. It's not + // worth keeping a list. + draining_client_->client_->close(); + } + + ASSERT(!draining_client_); + if (primary_client_->client_->numActiveRequests() == 0) { + // If we are making a new connection and the primary does not have any active requests just + // close it now. + primary_client_->client_->close(); + } else { + draining_client_ = std::move(primary_client_); + } + + ASSERT(!primary_client_); +} + +void ConnPoolImpl::onConnectTimeout(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "connect timeout", *client.client_); + host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + client.client_->close(); +} + +void ConnPoolImpl::onGoAway(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "remote goaway", *client.client_); + host_->cluster().stats().upstream_cx_close_notify_.inc(); + if (&client == primary_client_.get()) { + movePrimaryClientToDraining(); + } +} + +void ConnPoolImpl::onStreamDestroy(ActiveClient& client) { + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.client_, + client.client_->numActiveRequests()); + host_->stats().rq_active_.dec(); + host_->cluster().stats().upstream_rq_active_.dec(); + host_->cluster().resourceManager(priority_).requests().dec(); + if (&client == draining_client_.get() && client.client_->numActiveRequests() == 0) { + // Close out the draining client if we no long have active requests. + client.client_->close(); + } + + // If we are destroying this stream because of a disconnect, do not check for drain here. We will + // wait until the connection has been fully drained of streams and then check in the connection + // event callback. + if (!client.closed_with_active_rq_) { + checkForDrained(); + } +} + +void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason reason) { + if (reason == StreamResetReason::ConnectionTermination || + reason == StreamResetReason::ConnectionFailure) { + host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); + client.closed_with_active_rq_ = true; + } else if (reason == StreamResetReason::LocalReset) { + host_->cluster().stats().upstream_rq_tx_reset_.inc(); + } else if (reason == StreamResetReason::RemoteReset) { + host_->cluster().stats().upstream_rq_rx_reset_.inc(); + } +} + +void ConnPoolImpl::onUpstreamReady() { + // Establishes new codec streams for each pending request. + while (!pending_requests_.empty()) { + newClientStream(pending_requests_.back()->decoder_, pending_requests_.back()->callbacks_); + pending_requests_.pop_back(); + } +} + +ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) + : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent) { + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + client_ = parent_.createCodecClient(data); + client_->addConnectionCallbacks(*this); + client_->setCodecClientCallbacks(*this); + client_->setCodecConnectionCallbacks(*this); + + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); + + client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); +} + +ConnPoolImpl::ActiveClient::~ActiveClient() { + parent_.host_->stats().cx_active_.dec(); + parent_.host_->cluster().stats().upstream_cx_active_.dec(); +} + +CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { + CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP2, std::move(data.connection_), + data.host_description_, dispatcher_)}; + return codec; +} + +uint32_t ProdConnPoolImpl::maxTotalStreams() { return MAX_STREAMS; } + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/conn_pool_legacy.h b/source/common/http/http2/conn_pool_legacy.h new file mode 100644 index 0000000000000..6fef4d989de0b --- /dev/null +++ b/source/common/http/http2/conn_pool_legacy.h @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +#include "envoy/event/timer.h" +#include "envoy/http/conn_pool.h" +#include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" +#include "envoy/upstream/upstream.h" + +#include "common/http/codec_client.h" +#include "common/http/conn_pool_base_legacy.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +/** + * Implementation of a "connection pool" for HTTP/2. This mainly handles stats as well as + * shifting to a new connection if we reach max streams on the primary. This is a base class + * used for both the prod implementation as well as the testing one. + */ +class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { +public: + ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + ~ConnPoolImpl() override; + + // Http::ConnectionPool::Instance + Http::Protocol protocol() const override { return Http::Protocol::Http2; } + void addDrainedCallback(DrainedCb cb) override; + void drainConnections() override; + bool hasActiveConnections() const override; + ConnectionPool::Cancellable* newStream(Http::StreamDecoder& response_decoder, + ConnectionPool::Callbacks& callbacks) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; + +protected: + struct ActiveClient : ConnPoolImplBase::ActiveClient, + public Network::ConnectionCallbacks, + public CodecClientCallbacks, + public Event::DeferredDeletable, + public Http::ConnectionCallbacks { + ActiveClient(ConnPoolImpl& parent); + ~ActiveClient() override; + + void onConnectTimeout() override { parent_.onConnectTimeout(*this); } + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, event); + } + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + // CodecClientCallbacks + void onStreamDestroy() override { parent_.onStreamDestroy(*this); } + void onStreamReset(Http::StreamResetReason reason) override { + parent_.onStreamReset(*this, reason); + } + + // Http::ConnectionCallbacks + void onGoAway() override { parent_.onGoAway(*this); } + + ConnPoolImpl& parent_; + CodecClientPtr client_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + uint64_t total_streams_{}; + bool upstream_ready_{}; + bool closed_with_active_rq_{}; + }; + + using ActiveClientPtr = std::unique_ptr; + + // Http::ConnPoolImplBase + void checkForDrained() override; + + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; + virtual uint32_t maxTotalStreams() PURE; + void movePrimaryClientToDraining(); + void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); + void onConnectTimeout(ActiveClient& client); + void onGoAway(ActiveClient& client); + void onStreamDestroy(ActiveClient& client); + void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); + void newClientStream(Http::StreamDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); + void onUpstreamReady(); + + Event::Dispatcher& dispatcher_; + ActiveClientPtr primary_client_; + ActiveClientPtr draining_client_; + std::list drained_callbacks_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; +}; + +/** + * Production implementation of the HTTP/2 connection pool. + */ +class ProdConnPoolImpl : public ConnPoolImpl { +public: + using ConnPoolImpl::ConnPoolImpl; + +private: + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; + uint32_t maxTotalStreams() override; + + // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe + // side we do 2^29. + static const uint64_t MAX_STREAMS = (1 << 29); +}; + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 0f2432524e26d..9d88371b309aa 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -32,6 +32,8 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.strict_authority_validation", "envoy.reloadable_features.reject_unsupported_transfer_encodings", "envoy.reloadable_features.strict_method_validation", + "envoy.reloadable_features.new_http1_connection_pool_behavior", + "envoy.reloadable_features.new_http2_connection_pool_behavior", }; // This is a section for officially sanctioned runtime features which are too diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index dc868e33381ea..c462d26a08b7d 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1330,15 +1330,14 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { if (protocol == Http::Protocol::Http2 && runtime_.snapshot().featureEnabled("upstream.use_http2", 100)) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); + return Http::Http2::allocateConnPool(dispatcher, host, priority, options, + transport_socket_options); } else if (protocol == Http::Protocol::Http3) { // Quic connection pool is not implemented. NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } else { - return std::make_unique(dispatcher, host, priority, options, - host->cluster().http1Settings(), - transport_socket_options); + return Http::Http1::allocateConnPool(dispatcher, host, priority, options, + transport_socket_options); } } diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index e117fa725623c..64e734e4620d0 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -63,3 +63,27 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_test( + name = "conn_pool_legacy_test", + srcs = ["conn_pool_legacy_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/event:dispatcher_lib", + "//source/common/http:codec_client_lib", + "//source/common/http/http1:conn_pool_legacy_lib", + "//source/common/network:utility_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//test/common/http:common_lib", + "//test/common/upstream:utility_lib", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/common/http/http1/conn_pool_legacy_test.cc b/test/common/http/http1/conn_pool_legacy_test.cc new file mode 100644 index 0000000000000..adad3d7a83b7d --- /dev/null +++ b/test/common/http/http1/conn_pool_legacy_test.cc @@ -0,0 +1,959 @@ +#include +#include + +#include "envoy/http/codec.h" + +#include "common/buffer/buffer_impl.h" +#include "common/event/dispatcher_impl.h" +#include "common/http/codec_client.h" +#include "common/http/http1/conn_pool_legacy.h" +#include "common/network/utility.h" +#include "common/upstream/upstream_impl.h" + +#include "test/common/http/common.h" +#include "test/common/upstream/utility.h" +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/printers.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::DoAll; +using testing::InSequence; +using testing::Invoke; +using testing::NiceMock; +using testing::Property; +using testing::Return; +using testing::ReturnRef; +using testing::SaveArg; + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { +namespace { + +/** + * A test version of ConnPoolImpl that allows for mocking beneath the codec clients. + */ +class ConnPoolImplForTest : public ConnPoolImpl { +public: + ConnPoolImplForTest(Event::MockDispatcher& dispatcher, + Upstream::ClusterInfoConstSharedPtr cluster, + NiceMock* upstream_ready_timer) + : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), + Upstream::ResourcePriority::Default, nullptr, nullptr), + api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), + mock_upstream_ready_timer_(upstream_ready_timer) {} + + ~ConnPoolImplForTest() override { + EXPECT_EQ(0U, ready_clients_.size()); + EXPECT_EQ(0U, busy_clients_.size()); + EXPECT_EQ(0U, pending_requests_.size()); + } + + struct TestCodecClient { + Http::MockClientConnection* codec_; + Network::MockClientConnection* connection_; + CodecClient* codec_client_; + Event::MockTimer* connect_timer_; + Event::DispatcherPtr client_dispatcher_; + }; + + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { + // We expect to own the connection, but already have it, so just release it to prevent it from + // getting deleted. + data.connection_.release(); + return CodecClientPtr{createCodecClient_()}; + } + + MOCK_METHOD0(createCodecClient_, CodecClient*()); + MOCK_METHOD0(onClientDestroy, void()); + + void expectClientCreate(Protocol protocol = Protocol::Http11) { + test_clients_.emplace_back(); + TestCodecClient& test_client = test_clients_.back(); + test_client.connection_ = new NiceMock(); + test_client.codec_ = new NiceMock(); + test_client.connect_timer_ = new NiceMock(&mock_dispatcher_); + std::shared_ptr cluster{new NiceMock()}; + test_client.client_dispatcher_ = api_->allocateDispatcher(); + Network::ClientConnectionPtr connection{test_client.connection_}; + test_client.codec_client_ = new CodecClientForTest( + CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, + [this](CodecClient* codec_client) -> void { + for (auto i = test_clients_.begin(); i != test_clients_.end(); i++) { + if (i->codec_client_ == codec_client) { + onClientDestroy(); + test_clients_.erase(i); + return; + } + } + }, + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); + EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce(Return(test_client.connection_)); + EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); + ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); + } + + void expectEnableUpstreamReady() { + EXPECT_FALSE(upstream_ready_enabled_); + EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + } + + void expectAndRunUpstreamReady() { + EXPECT_TRUE(upstream_ready_enabled_); + mock_upstream_ready_timer_->invokeCallback(); + EXPECT_FALSE(upstream_ready_enabled_); + } + + Api::ApiPtr api_; + Event::MockDispatcher& mock_dispatcher_; + NiceMock* mock_upstream_ready_timer_; + std::vector test_clients_; +}; + +/** + * Test fixture for all connection pool tests. + */ +class Http1ConnPoolImplLegacyTest : public testing::Test { +public: + Http1ConnPoolImplLegacyTest() + : upstream_ready_timer_(new NiceMock(&dispatcher_)), + conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} + + ~Http1ConnPoolImplLegacyTest() override { + EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + } + + NiceMock dispatcher_; + std::shared_ptr cluster_{new NiceMock()}; + NiceMock* upstream_ready_timer_; + ConnPoolImplForTest conn_pool_; + NiceMock runtime_; +}; + +/** + * Helper for dealing with an active test request. + */ +struct ActiveTestRequest { + enum class Type { Pending, CreateConnection, Immediate }; + + ActiveTestRequest(Http1ConnPoolImplLegacyTest& parent, size_t client_index, Type type) + : parent_(parent), client_index_(client_index) { + uint64_t active_rq_observed = + parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count(); + uint64_t current_rq_total = parent_.cluster_->stats_.upstream_rq_total_.value(); + if (type == Type::CreateConnection) { + parent.conn_pool_.expectClientCreate(); + } + + if (type == Type::Immediate) { + expectNewStream(); + } + + handle_ = parent.conn_pool_.newStream(outer_decoder_, callbacks_); + + if (type == Type::Immediate) { + EXPECT_EQ(nullptr, handle_); + } else { + EXPECT_NE(nullptr, handle_); + } + + if (type == Type::CreateConnection) { + EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); + expectNewStream(); + parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( + Network::ConnectionEvent::Connected); + } + if (type != Type::Pending) { + EXPECT_EQ(current_rq_total + 1, parent_.cluster_->stats_.upstream_rq_total_.value()); + EXPECT_EQ(active_rq_observed + 1, + parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default) + .requests() + .count()); + } + } + + void completeResponse(bool with_body) { + // Test additional metric writes also. + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":status", "200"}, {"x-envoy-upstream-canary", "true"}}); + + inner_decoder_->decodeHeaders(std::move(response_headers), !with_body); + if (with_body) { + Buffer::OwnedImpl data; + inner_decoder_->decodeData(data, true); + } + } + + void expectNewStream() { + EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(request_encoder_))); + EXPECT_CALL(callbacks_.pool_ready_, ready()); + } + + void startRequest() { callbacks_.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); } + + Http1ConnPoolImplLegacyTest& parent_; + size_t client_index_; + NiceMock outer_decoder_; + Http::ConnectionPool::Cancellable* handle_{}; + NiceMock request_encoder_; + Http::StreamDecoder* inner_decoder_{}; + ConnPoolCallbacks callbacks_; +}; + +/** + * Verify that the pool's host is a member of the cluster the pool was constructed with. + */ +TEST_F(Http1ConnPoolImplLegacyTest, Host) { + EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); +} + +/** + * Verify that connections are drained when requested. + */ +TEST_F(Http1ConnPoolImplLegacyTest, DrainConnections) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + + ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); + r2.startRequest(); + + r1.completeResponse(false); + + // This will destroy the ready client and set requests remaining to 1 on the busy client. + conn_pool_.drainConnections(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + // This will destroy the busy client when the response finishes. + r2.completeResponse(false); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test all timing stats are set. + */ +TEST_F(Http1ConnPoolImplLegacyTest, VerifyTimingStats) { + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + r1.completeResponse(false); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test that buffer limits are set. + */ +TEST_F(Http1ConnPoolImplLegacyTest, VerifyBufferLimits) { + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); + EXPECT_CALL(*conn_pool_.test_clients_.back().connection_, setBufferLimits(8192)); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(callbacks.pool_failure_, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Verify that canceling pending connections within the callback works. + */ +TEST_F(Http1ConnPoolImplLegacyTest, VerifyCancelInCallback) { + Http::ConnectionPool::Cancellable* handle1{}; + // In this scenario, all connections must succeed, so when + // one fails, the others are canceled. + // Note: We rely on the fact that the implementation cancels the second request first, + // to simplify the test. + ConnPoolCallbacks callbacks1; + EXPECT_CALL(callbacks1.pool_failure_, ready()).Times(0); + ConnPoolCallbacks callbacks2; + EXPECT_CALL(callbacks2.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { + handle1->cancel(); + })); + + NiceMock outer_decoder; + // Create the first client. + conn_pool_.expectClientCreate(); + handle1 = conn_pool_.newStream(outer_decoder, callbacks1); + ASSERT_NE(nullptr, handle1); + + // Create the second client. + Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder, callbacks2); + ASSERT_NE(nullptr, handle2); + + // Simulate connection failure. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Tests a request that generates a new connection, completes, and then a second request that uses + * the same connection. + */ +TEST_F(Http1ConnPoolImplLegacyTest, MultipleRequestAndResponse) { + InSequence s; + + // Request 1 should kick off a new connection. + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + r1.completeResponse(false); + + // Request 2 should not. + ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Immediate); + r2.startRequest(); + r2.completeResponse(true); + + // Cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test when we overflow max pending requests. + */ +TEST_F(Http1ConnPoolImplLegacyTest, MaxPendingRequests) { + cluster_->resetResourceManager(1, 1, 1024, 1, 1); + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); + + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + EXPECT_CALL(callbacks2.pool_failure_, ready()); + Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder2, callbacks2); + EXPECT_EQ(nullptr, handle2); + + EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); + + handle->cancel(); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value()); +} + +/** + * Tests a connection failure before a request is bound which should result in the pending request + * getting purged. + */ +TEST_F(Http1ConnPoolImplLegacyTest, ConnectFailure) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + EXPECT_CALL(callbacks.pool_failure_, ready()); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); +} + +/** + * Tests that connection creation time is recorded correctly even in cases where + * there are multiple pending connection creation attempts to the same upstream. + */ +TEST_F(Http1ConnPoolImplLegacyTest, MeasureConnectTime) { + constexpr uint64_t sleep1_ms = 20; + constexpr uint64_t sleep2_ms = 10; + constexpr uint64_t sleep3_ms = 5; + Event::SimulatedTimeSystem simulated_time; + + // Allow concurrent creation of 2 upstream connections. + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + + InSequence s; + + // Start the first connect attempt. + conn_pool_.expectClientCreate(); + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); + + // Move time forward and start the second connect attempt. + simulated_time.sleep(std::chrono::milliseconds(sleep1_ms)); + conn_pool_.expectClientCreate(); + ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::Pending); + + // Move time forward, signal that the first connect completed and verify the time to connect. + uint64_t upstream_cx_connect_ms1 = 0; + simulated_time.sleep(std::chrono::milliseconds(sleep2_ms)); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) + .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); + r1.expectNewStream(); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); + + // Move time forward, signal that the second connect completed and verify the time to connect. + uint64_t upstream_cx_connect_ms2 = 0; + simulated_time.sleep(std::chrono::milliseconds(sleep3_ms)); + EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) + .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); + r2.expectNewStream(); + conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); + + // Cleanup, cause the connections to go away. + for (auto& test_client : conn_pool_.test_clients_) { + EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL( + cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + } + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Tests a connect timeout. Also test that we can add a new request during ejection processing. + */ +TEST_F(Http1ConnPoolImplLegacyTest, ConnectTimeout) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder1; + ConnPoolCallbacks callbacks1; + conn_pool_.expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder1, callbacks1)); + + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { + conn_pool_.expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder2, callbacks2)); + })); + + conn_pool_.test_clients_[0].connect_timer_->invokeCallback(); + + EXPECT_CALL(callbacks2.pool_failure_, ready()); + conn_pool_.test_clients_[1].connect_timer_->invokeCallback(); + + EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value()); +} + +/** + * Test cancelling before the request is bound to a connection. + */ +TEST_F(Http1ConnPoolImplLegacyTest, CancelBeforeBound) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + handle->cancel(); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test an upstream disconnection while there is a bound request. + */ +TEST_F(Http1ConnPoolImplLegacyTest, DisconnectWhileBound) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // We should get a reset callback when the connection disconnects. + Http::MockStreamCallbacks stream_callbacks; + EXPECT_CALL(stream_callbacks, onResetStream(StreamResetReason::ConnectionTermination, _)); + request_encoder.getStream().addCallbacks(stream_callbacks); + + // Kill the connection while it has an active request. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test that we correctly handle reaching max connections. + */ +TEST_F(Http1ConnPoolImplLegacyTest, MaxConnections) { + InSequence s; + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.cx_open_.value()); + + // Request 1 should kick off a new connection. + NiceMock outer_decoder1; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + + EXPECT_NE(nullptr, handle); + + // Request 2 should not kick off a new connection. + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + handle = conn_pool_.newStream(outer_decoder2, callbacks2); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); + EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value()); + + EXPECT_NE(nullptr, handle); + + // Connect event will bind to request 1. + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Finishing request 1 will immediately bind to request 2. + conn_pool_.expectEnableUpstreamReady(); + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks2.pool_ready_, ready()); + + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + Http::HeaderMapPtr response_headers(new TestHeaderMapImpl{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + conn_pool_.expectAndRunUpstreamReady(); + callbacks2.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. + response_headers = std::make_unique( + std::initializer_list>{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + // Cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test when upstream closes connection without 'connection: close' like + * https://github.com/envoyproxy/envoy/pull/2715 + */ +TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseWithoutHeader) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder1; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + + EXPECT_NE(nullptr, handle); + + // Request 2 should not kick off a new connection. + NiceMock outer_decoder2; + ConnPoolCallbacks callbacks2; + handle = conn_pool_.newStream(outer_decoder2, callbacks2); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); + + EXPECT_NE(nullptr, handle); + + // Connect event will bind to request 1. + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Finishing request 1 will schedule binding the connection to request 2. + conn_pool_.expectEnableUpstreamReady(); + + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + Http::HeaderMapPtr response_headers(new TestHeaderMapImpl{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + // Cause the connection to go away. + conn_pool_.expectClientCreate(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + conn_pool_.expectAndRunUpstreamReady(); + + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks2.pool_ready_, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + callbacks2.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. + response_headers = std::make_unique( + std::initializer_list>{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Test when upstream sends us 'connection: close' + */ +TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseHeader) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response with 'connection: close' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":status", "200"}, {"Connection", "Close"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test when upstream sends us 'proxy-connection: close' + */ +TEST_F(Http1ConnPoolImplLegacyTest, ProxyConnectionCloseHeader) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response with 'proxy-connection: close' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test when upstream is HTTP/1.0 and does not send 'connection: keep-alive' + */ +TEST_F(Http1ConnPoolImplLegacyTest, Http10NoConnectionKeepAlive) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(Protocol::Http10); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response without 'connection: keep-alive' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers( + new TestHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test when we reach max requests per connection. + */ +TEST_F(Http1ConnPoolImplLegacyTest, MaxRequestsPerConnection) { + InSequence s; + + cluster_->max_requests_per_connection_ = 1; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + // Response with 'connection: close' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + Http::HeaderMapPtr response_headers(new TestHeaderMapImpl{{":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value()); +} + +TEST_F(Http1ConnPoolImplLegacyTest, ConcurrentConnections) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + + ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); + r2.startRequest(); + + ActiveTestRequest r3(*this, 0, ActiveTestRequest::Type::Pending); + + // Finish r1, which gets r3 going. + conn_pool_.expectEnableUpstreamReady(); + r3.expectNewStream(); + + r1.completeResponse(false); + conn_pool_.expectAndRunUpstreamReady(); + r3.startRequest(); + EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value()); + + r2.completeResponse(false); + r3.completeResponse(false); + + // Disconnect both clients. + EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); + conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http1ConnPoolImplLegacyTest, DrainCallback) { + InSequence s; + ReadyWatcher drained; + + EXPECT_CALL(drained, ready()); + conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); + r2.handle_->cancel(); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); + + EXPECT_CALL(drained, ready()); + r1.startRequest(); + r1.completeResponse(false); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +// Test draining a connection pool that has a pending connection. +TEST_F(Http1ConnPoolImplLegacyTest, DrainWhileConnecting) { + InSequence s; + ReadyWatcher drained; + + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + handle->cancel(); + EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(drained, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http1ConnPoolImplLegacyTest, RemoteCloseToCompleteResponse) { + InSequence s; + + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + Http::StreamDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); + + inner_decoder->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, false); + Buffer::OwnedImpl dummy_data("12345"); + inner_decoder->decodeData(dummy_data, false); + + Buffer::OwnedImpl empty_data; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) + .WillOnce(Invoke([&](Buffer::Instance& data) -> void { + // Simulate the onResponseComplete call to decodeData since dispatch is mocked out. + inner_decoder->decodeData(data, true); + })); + + EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http1ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { + EXPECT_FALSE(conn_pool_.hasActiveConnections()); +} + +TEST_F(Http1ConnPoolImplLegacyTest, ActiveRequestHasActiveConnectionsTrue) { + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + + EXPECT_TRUE(conn_pool_.hasActiveConnections()); + + // cleanup + r1.completeResponse(false); + conn_pool_.drainConnections(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http1ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); + r1.startRequest(); + r1.completeResponse(false); + + EXPECT_FALSE(conn_pool_.hasActiveConnections()); + + conn_pool_.drainConnections(); + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http1ConnPoolImplLegacyTest, PendingRequestIsConsideredActive) { + conn_pool_.expectClientCreate(); + ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); + + EXPECT_TRUE(conn_pool_.hasActiveConnections()); + + EXPECT_CALL(conn_pool_, onClientDestroy()); + r1.handle_->cancel(); + EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); + conn_pool_.drainConnections(); + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +} // namespace +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 9e91a494468d6..e06808ca71703 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -49,7 +49,7 @@ class ConnPoolImplForTest : public ConnPoolImpl { Upstream::ClusterInfoConstSharedPtr cluster, NiceMock* upstream_ready_timer) : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, Http1Settings(), nullptr), + Upstream::ResourcePriority::Default, nullptr, nullptr), api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer) {} @@ -98,10 +98,10 @@ class ConnPoolImplForTest : public ConnPoolImpl { } }, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); } @@ -132,7 +132,7 @@ class Http1ConnPoolImplTest : public testing::Test { conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} ~Http1ConnPoolImplTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); } NiceMock dispatcher_; @@ -170,8 +170,8 @@ struct ActiveTestRequest { } if (type == Type::CreateConnection) { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); expectNewStream(); + EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( Network::ConnectionEvent::Connected); } @@ -417,34 +417,34 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { // Move time forward, signal that the first connect completed and verify the time to connect. uint64_t upstream_cx_connect_ms1 = 0; simulated_time.sleep(std::chrono::milliseconds(sleep2_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); r1.expectNewStream(); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); // Move time forward, signal that the second connect completed and verify the time to connect. uint64_t upstream_cx_connect_ms2 = 0; simulated_time.sleep(std::chrono::milliseconds(sleep3_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); r2.expectNewStream(); + EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); // Cleanup, cause the connections to go away. - for (auto& test_client : conn_pool_.test_clients_) { - EXPECT_CALL(conn_pool_, onClientDestroy()); + while (!conn_pool_.test_clients_.empty()) { EXPECT_CALL( cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_.test_clients_.front().connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); } - dispatcher_.clearDeferredDeleteList(); } /** @@ -853,11 +853,10 @@ TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { EXPECT_NE(nullptr, handle); conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - handle->cancel(); EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(drained, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + handle->cancel(); EXPECT_CALL(conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); @@ -874,10 +873,10 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { NiceMock request_encoder; Http::StreamDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); + EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders(TestHeaderMapImpl{}, true); @@ -946,7 +945,7 @@ TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value()); } } // namespace diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index ded96525c02b0..6c8387fbc2d4c 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -64,6 +64,25 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "conn_pool_legacy_test", + srcs = ["conn_pool_legacy_test.cc"], + deps = [ + "//source/common/event:dispatcher_lib", + "//source/common/http/http2:conn_pool_legacy_lib", + "//source/common/network:utility_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//test/common/http:common_lib", + "//test/common/upstream:utility_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + envoy_cc_test_library( name = "http2_frame", srcs = ["http2_frame.cc"], diff --git a/test/common/http/http2/conn_pool_legacy_test.cc b/test/common/http/http2/conn_pool_legacy_test.cc new file mode 100644 index 0000000000000..ffc5339e9d2b5 --- /dev/null +++ b/test/common/http/http2/conn_pool_legacy_test.cc @@ -0,0 +1,774 @@ +#include +#include +#include + +#include "common/event/dispatcher_impl.h" +#include "common/http/http2/conn_pool_legacy.h" +#include "common/network/utility.h" +#include "common/upstream/upstream_impl.h" + +#include "test/common/http/common.h" +#include "test/common/upstream/utility.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/printers.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::DoAll; +using testing::InSequence; +using testing::Invoke; +using testing::NiceMock; +using testing::Property; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +class TestConnPoolImpl : public ConnPoolImpl { +public: + using ConnPoolImpl::ConnPoolImpl; + + CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { + // We expect to own the connection, but already have it, so just release it to prevent it from + // getting deleted. + data.connection_.release(); + return CodecClientPtr{createCodecClient_(data)}; + } + + MOCK_METHOD1(createCodecClient_, CodecClient*(Upstream::Host::CreateConnectionData& data)); + + uint32_t maxTotalStreams() override { return max_streams_; } + + uint32_t max_streams_{std::numeric_limits::max()}; +}; + +class ActiveTestRequest; + +class Http2ConnPoolImplLegacyTest : public testing::Test { +public: + struct TestCodecClient { + Http::MockClientConnection* codec_; + Network::MockClientConnection* connection_; + CodecClientForTest* codec_client_; + Event::MockTimer* connect_timer_; + Event::DispatcherPtr client_dispatcher_; + }; + + Http2ConnPoolImplLegacyTest() + : api_(Api::createApiForTest(stats_store_)), + pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} + + ~Http2ConnPoolImplLegacyTest() override { + EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + } + + // Creates a new test client, expecting a new connection to be created and associated + // with the new client. + void expectClientCreate(absl::optional buffer_limits = {}) { + test_clients_.emplace_back(); + TestCodecClient& test_client = test_clients_.back(); + test_client.connection_ = new NiceMock(); + test_client.codec_ = new NiceMock(); + test_client.connect_timer_ = new NiceMock(&dispatcher_); + test_client.client_dispatcher_ = api_->allocateDispatcher(); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce(Return(test_client.connection_)); + auto cluster = std::make_shared>(); + Network::ClientConnectionPtr connection{test_client.connection_}; + test_client.codec_client_ = new CodecClientForTest( + CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, + [this](CodecClient*) -> void { onClientDestroy(); }, + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + if (buffer_limits) { + EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(*buffer_limits)); + EXPECT_CALL(*test_clients_.back().connection_, setBufferLimits(*buffer_limits)); + } + EXPECT_CALL(pool_, createCodecClient_(_)) + .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { + return test_clients_.back().codec_client_; + })); + } + + // Connects a pending connection for client with the given index, asserting + // that the provided request receives onPoolReady. + void expectClientConnect(size_t index, ActiveTestRequest& r); + // Asserts that onPoolReady is called on the request. + void expectStreamConnect(size_t index, ActiveTestRequest& r); + + // Resets the connection belonging to the provided index, asserting that the + // provided request receives onPoolFailure. + void expectClientReset(size_t index, ActiveTestRequest& r); + // Asserts that the provided requests receives onPoolFailure. + void expectStreamReset(ActiveTestRequest& r); + + /** + * Closes a test client. + */ + void closeClient(size_t index); + + /** + * Completes an active request. Useful when this flow is not part of the main test assertions. + */ + void completeRequest(ActiveTestRequest& r); + + /** + * Completes an active request and closes the upstream connection. Useful when this flow is + * not part of the main test assertions. + */ + void completeRequestCloseUpstream(size_t index, ActiveTestRequest& r); + + MOCK_METHOD0(onClientDestroy, void()); + + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + NiceMock dispatcher_; + std::shared_ptr cluster_{new NiceMock()}; + Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; + TestConnPoolImpl pool_; + std::vector test_clients_; + NiceMock runtime_; +}; + +class ActiveTestRequest { +public: + ActiveTestRequest(Http2ConnPoolImplLegacyTest& test, size_t client_index, bool expect_connected) { + if (expect_connected) { + EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_))); + EXPECT_CALL(callbacks_.pool_ready_, ready()); + EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); + } else { + EXPECT_NE(nullptr, test.pool_.newStream(decoder_, callbacks_)); + } + } + + Http::MockStreamDecoder decoder_; + ConnPoolCallbacks callbacks_; + Http::StreamDecoder* inner_decoder_{}; + NiceMock inner_encoder_; +}; + +void Http2ConnPoolImplLegacyTest::expectClientConnect(size_t index, ActiveTestRequest& r) { + expectStreamConnect(index, r); + EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer()); + test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected); +} + +void Http2ConnPoolImplLegacyTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { + EXPECT_CALL(*test_clients_[index].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_))); + EXPECT_CALL(r.callbacks_.pool_ready_, ready()); +} + +void Http2ConnPoolImplLegacyTest::expectClientReset(size_t index, ActiveTestRequest& r) { + expectStreamReset(r); + EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); + test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); +} + +void Http2ConnPoolImplLegacyTest::expectStreamReset(ActiveTestRequest& r) { + EXPECT_CALL(r.callbacks_.pool_failure_, ready()); +} + +void Http2ConnPoolImplLegacyTest::closeClient(size_t index) { + test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +void Http2ConnPoolImplLegacyTest::completeRequest(ActiveTestRequest& r) { + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); +} + +void Http2ConnPoolImplLegacyTest::completeRequestCloseUpstream(size_t index, ActiveTestRequest& r) { + completeRequest(r); + closeClient(index); +} + +/** + * Verify that the pool retains and returns the host it was constructed with. + */ +TEST_F(Http2ConnPoolImplLegacyTest, Host) { EXPECT_EQ(host_, pool_.host()); } + +/** + * Verify that connections are drained when requested. + */ +TEST_F(Http2ConnPoolImplLegacyTest, DrainConnections) { + InSequence s; + pool_.max_streams_ = 1; + + // Test drain connections call prior to any connections being created. + pool_.drainConnections(); + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // This will move primary to draining and destroy draining. + pool_.drainConnections(); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + // This will destroy draining. + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that requests are queued up in the conn pool until the connection becomes ready. +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequests) { + InSequence s; + + // Create three requests. These should be queued up. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + expectStreamConnect(0, r1); + expectStreamConnect(0, r2); + expectClientConnect(0, r3); + + // Send a request through each stream. + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); + r3.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // Since we now have an active connection, subsequent requests should connect immediately. + ActiveTestRequest r4(*this, 0, true); + + // Clean up everything. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that requests are queued up in the conn pool and fail when the connection +// fails to be established. +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsFailure) { + InSequence s; + pool_.max_streams_ = 10; + + // Create three requests. These should be queued up. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + // Note that these occur in reverse order due to the order we purge pending requests in. + expectStreamReset(r3); + expectStreamReset(r2); + expectClientReset(0, r1); + + expectClientCreate(); + // Since we have no active connection, subsequence requests will queue until + // the new connection is established. + ActiveTestRequest r4(*this, 1, false); + expectClientConnect(1, r4); + + // Clean up everything. + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that requests are queued up in the conn pool and respect max request circuit breaking +// when the connection is established. +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsRequestOverflow) { + InSequence s; + + // Inflate the resource count to just under the limit. + auto& requests = host_->cluster().resourceManager(Upstream::ResourcePriority::Default).requests(); + for (uint64_t i = 0; i < requests.max() - 1; ++i) { + requests.inc(); + } + + // Create three requests. These should be queued up. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // We queued up three requests, but we can only afford one before hitting the circuit + // breaker. Thus, we expect to see 2 resets and one successful connect. + expectStreamConnect(0, r1); + expectStreamReset(r2); + expectStreamReset(r3); + EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Clean up everything. + for (uint64_t i = 0; i < requests.max() - 1; ++i) { + requests.dec(); + } + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Verifies that we honor the max pending requests circuit breaker. +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsMaxPendingCircuitBreaker) { + InSequence s; + + // Inflate the resource count to just under the limit. + auto& pending_reqs = + host_->cluster().resourceManager(Upstream::ResourcePriority::Default).pendingRequests(); + for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { + pending_reqs.inc(); + } + + // Create two requests. The first one should be enqueued, while the second one + // should fail fast due to us being above the max pending requests limit. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + + Http::MockStreamDecoder decoder; + ConnPoolCallbacks callbacks; + EXPECT_CALL(callbacks.pool_failure_, ready()); + EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + + expectStreamConnect(0, r1); + EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Clean up everything. + for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { + pending_reqs.dec(); + } + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, VerifyConnectionTimingStats) { + InSequence s; + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +/** + * Test that buffer limits are set. + */ +TEST_F(Http2ConnPoolImplLegacyTest, VerifyBufferLimits) { + InSequence s; + expectClientCreate(8192); + ActiveTestRequest r1(*this, 0, false); + + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, RequestAndResponse) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + ActiveTestRequest r2(*this, 0, true); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, LocalReset) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, false); + r1.callbacks_.outer_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value()); + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, RemoteReset) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, false); + r1.inner_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_rx_reset_.value()); + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectWithActiveRequest) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ReadyWatcher drained; + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectDrainingWithActiveRequest) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ReadyWatcher drained; + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimary) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ReadyWatcher drained; + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimaryNoActiveRequest) { + InSequence s; + pool_.max_streams_ = 1; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + ReadyWatcher drained; + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(drained, ready()); + pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + +TEST_F(Http2ConnPoolImplLegacyTest, ConnectTimeout) { + InSequence s; + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + EXPECT_CALL(r1.callbacks_.pool_failure_, ready()); + test_clients_[0].connect_timer_->invokeCallback(); + + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_timeout_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, MaxGlobalRequests) { + cluster_->resetResourceManager(1024, 1024, 1, 1, 1); + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + ConnPoolCallbacks callbacks; + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks.pool_failure_, ready()); + EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, GoAway) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); + r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[0].codec_client_->raiseGoAway(); + + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); + r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); + + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_close_notify_.value()); +} + +TEST_F(Http2ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { + EXPECT_FALSE(pool_.hasActiveConnections()); +} + +// Show that an active request on the primary connection is considered active. +TEST_F(Http2ConnPoolImplLegacyTest, ActiveConnectionsHasActiveRequestsTrue) { + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + + EXPECT_TRUE(pool_.hasActiveConnections()); + + completeRequestCloseUpstream(0, r1); +} + +// Show that pending requests are considered active. +TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsConsideredActive) { + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + + EXPECT_TRUE(pool_.hasActiveConnections()); + + expectClientConnect(0, r1); + completeRequestCloseUpstream(0, r1); +} + +// Show that even if there is a primary client still, if all of its requests have completed, then it +// does not have any active connections. +TEST_F(Http2ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + completeRequest(r1); + + EXPECT_FALSE(pool_.hasActiveConnections()); + + closeClient(0); +} + +// Show that if connections are draining, they're still considered active. +TEST_F(Http2ConnPoolImplLegacyTest, DrainingConnectionsConsideredActive) { + pool_.max_streams_ = 1; + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + pool_.drainConnections(); + + EXPECT_TRUE(pool_.hasActiveConnections()); + + completeRequest(r1); + closeClient(0); +} + +// Show that once we've drained all connections, there are no longer any active. +TEST_F(Http2ConnPoolImplLegacyTest, DrainedConnectionsNotActive) { + pool_.max_streams_ = 1; + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + expectClientConnect(0, r1); + pool_.drainConnections(); + completeRequest(r1); + + EXPECT_FALSE(pool_.hasActiveConnections()); + + closeClient(0); +} +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index d7cf066bc4c65..55386371c08c0 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -44,10 +44,6 @@ class TestConnPoolImpl : public ConnPoolImpl { } MOCK_METHOD(CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData & data)); - - uint32_t maxTotalStreams() override { return max_streams_; } - - uint32_t max_streams_{std::numeric_limits::max()}; }; class ActiveTestRequest; @@ -64,10 +60,14 @@ class Http2ConnPoolImplTest : public testing::Test { Http2ConnPoolImplTest() : api_(Api::createApiForTest(stats_store_)), - pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} + pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) { + // Default connections to 1024 because the tests shouldn't be relying on the + // connection resource limit for most tests. + cluster_->resetResourceManager(1024, 1024, 1024, 1, 1); + } ~Http2ConnPoolImplTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); } // Creates a new test client, expecting a new connection to be created and associated @@ -79,7 +79,6 @@ class Http2ConnPoolImplTest : public testing::Test { test_client.codec_ = new NiceMock(); test_client.connect_timer_ = new NiceMock(&dispatcher_); test_client.client_dispatcher_ = api_->allocateDispatcher(); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); auto cluster = std::make_shared>(); @@ -96,6 +95,7 @@ class Http2ConnPoolImplTest : public testing::Test { .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { return test_clients_.back().codec_client_; })); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); } // Connects a pending connection for client with the given index, asserting @@ -147,7 +147,8 @@ class ActiveTestRequest { EXPECT_CALL(callbacks_.pool_ready_, ready()); EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); } else { - EXPECT_NE(nullptr, test.pool_.newStream(decoder_, callbacks_)); + handle_ = test.pool_.newStream(decoder_, callbacks_); + EXPECT_NE(nullptr, handle_); } } @@ -155,6 +156,7 @@ class ActiveTestRequest { ConnPoolCallbacks callbacks_; Http::StreamDecoder* inner_decoder_{}; NiceMock inner_encoder_; + ConnectionPool::Cancellable* handle_{}; }; void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { @@ -202,12 +204,90 @@ void Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTes */ TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_.host()); } +/** + * Verify that idle connections are closed immediately when draining. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnectionIdle) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + completeRequest(r); + + EXPECT_CALL(*this, onClientDestroy()); + pool_.drainConnections(); +} + +/** + * Verify that a ready connection with a request in progress is moved to + * draining and closes when the request completes. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnectionReadyWithRequest) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + pool_.drainConnections(); + + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + EXPECT_CALL(*this, onClientDestroy()); + r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); +} + +/** + * Verify that a busy connection is moved to draining and closes when all requests + * complete. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnectionBusy) { + cluster_->http2_settings_.max_concurrent_streams_ = 1; + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + pool_.drainConnections(); + + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + EXPECT_CALL(*this, onClientDestroy()); + r.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); +} + +/** + * Verify that draining connections with a pending request does not + * close the connection, but draining without a pending request does close + * the connection. + */ +TEST_F(Http2ConnPoolImplTest, DrainConnectionConnecting) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + + // Pending request prevents the connection from being drained + pool_.drainConnections(); + + // Cancel the pending request, and then the connection can be closed. + r.handle_->cancel(); + EXPECT_CALL(*this, onClientDestroy()); + pool_.drainConnections(); +} + /** * Verify that connections are drained when requested. */ TEST_F(Http2ConnPoolImplTest, DrainConnections) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; // Test drain connections call prior to any connections being created. pool_.drainConnections(); @@ -218,24 +298,78 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + // With max_streams == 1, the second request moves the first connection + // to draining. expectClientCreate(); ActiveTestRequest r2(*this, 1, false); expectClientConnect(1, r2); EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); - // This will move primary to draining and destroy draining. + // This will move the second connection to draining. pool_.drainConnections(); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - // This will destroy draining. + // This will destroy the 2 draining connections. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); + EXPECT_CALL(*this, onClientDestroy()).Times(2); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); +} + +// Test that cluster.http2_protocol_options.max_concurrent_streams limits +// concurrent requests and causes additional connections to be created. +TEST_F(Http2ConnPoolImplTest, MaxConcurrentRequestsPerStream) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + cluster_->http2_settings_.max_concurrent_streams_ = 1; + + InSequence s; + + { + // Create request and complete it. + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + completeRequest(r); + } + + // Previous request completed, so this one will re-use the connection. + { + ActiveTestRequest r(*this, 0, true); + completeRequest(r); + } + + // Two concurrent requests causes one additional connection to be created. + { + ActiveTestRequest r1(*this, 0, true); + expectClientCreate(); + ActiveTestRequest r2(*this, 1, false); + expectClientConnect(1, r2); + + // Complete one of them, and create another, and it will re-use the connection. + completeRequest(r2); + ActiveTestRequest r3(*this, 1, true); + + completeRequest(r1); + completeRequest(r3); + } + + // Create two more requests; both should use existing connections. + { + ActiveTestRequest r1(*this, 1, true); + ActiveTestRequest r2(*this, 0, true); + completeRequest(r1); + completeRequest(r2); + } + + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(2U, cluster_->stats_.upstream_cx_total_.value()); } // Verifies that requests are queued up in the conn pool until the connection becomes ready. @@ -275,11 +409,83 @@ TEST_F(Http2ConnPoolImplTest, PendingRequests) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } +// Verifies that the correct number of CONNECTING connections are created for +// the pending requests, when the total requests per connection is limited +TEST_F(Http2ConnPoolImplTest, PendingRequestsNumberConnectingTotalRequestsPerConnection) { + cluster_->max_requests_per_connection_ = 2; + InSequence s; + + // Create three requests. The 3rd should create a 2nd connection due to the limit + // of 2 requests per connection. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + expectClientCreate(); + ActiveTestRequest r3(*this, 1, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + expectStreamConnect(0, r1); + expectClientConnect(0, r2); + expectClientConnect(1, r3); + + // Send a request through each stream. + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); + r3.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // Clean up everything. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); +} + +// Verifies that the correct number of CONNECTING connections are created for +// the pending requests, when the concurrent requests per connection is limited +TEST_F(Http2ConnPoolImplTest, PendingRequestsNumberConnectingConcurrentRequestsPerConnection) { + cluster_->http2_settings_.max_concurrent_streams_ = 2; + InSequence s; + + // Create three requests. The 3rd should create a 2nd connection due to the limit + // of 2 requests per connection. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + expectClientCreate(); + ActiveTestRequest r3(*this, 1, false); + + // The connection now becomes ready. This should cause all the queued requests to be sent. + expectStreamConnect(0, r1); + expectClientConnect(0, r2); + expectClientConnect(1, r3); + + // Send a request through each stream. + EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); + r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); + r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); + r3.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + + // Clean up everything. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()).Times(2); + dispatcher_.clearDeferredDeleteList(); +} + // Verifies that requests are queued up in the conn pool and fail when the connection // fails to be established. TEST_F(Http2ConnPoolImplTest, PendingRequestsFailure) { InSequence s; - pool_.max_streams_ = 10; + cluster_->max_requests_per_connection_ = 10; // Create three requests. These should be queued up. expectClientCreate(); @@ -394,10 +600,10 @@ TEST_F(Http2ConnPoolImplTest, VerifyConnectionTimingStats) { EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); @@ -434,6 +640,7 @@ TEST_F(Http2ConnPoolImplTest, RequestAndResponse) { expectClientConnect(0, r1); EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_EQ(1U, cluster_->stats_.upstream_cx_active_.value()); EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); @@ -447,6 +654,7 @@ TEST_F(Http2ConnPoolImplTest, RequestAndResponse) { EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); } @@ -468,6 +676,7 @@ TEST_F(Http2ConnPoolImplTest, LocalReset) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value()); EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value()); } TEST_F(Http2ConnPoolImplTest, RemoteReset) { @@ -487,11 +696,12 @@ TEST_F(Http2ConnPoolImplTest, RemoteReset) { EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_rx_reset_.value()); EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_active_.value()); } TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -513,8 +723,10 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { } TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -548,8 +760,10 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { } TEST_F(Http2ConnPoolImplTest, DrainPrimary) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -582,18 +796,20 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { } TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { + cluster_->resetResourceManager(2, 1024, 1024, 1, 1); + InSequence s; - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); r1.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); r1.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); - EXPECT_CALL(dispatcher_, deferredDelete_(_)); expectClientCreate(); ActiveTestRequest r2(*this, 1, false); expectClientConnect(1, r2); @@ -601,11 +817,11 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { dispatcher_.clearDeferredDeleteList(); EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); r2.callbacks_.outer_encoder_->encodeHeaders(HeaderMapImpl{}, true); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); r2.inner_decoder_->decodeHeaders(HeaderMapPtr{new HeaderMapImpl{}}, true); ReadyWatcher drained; - EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(drained, ready()); pool_.addDrainedCallback([&]() -> void { drained.ready(); }); @@ -742,7 +958,7 @@ TEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection // Show that if connections are draining, they're still considered active. TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); @@ -756,7 +972,7 @@ TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { // Show that once we've drained all connections, there are no longer any active. TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { - pool_.max_streams_ = 1; + cluster_->max_requests_per_connection_ = 1; expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 37dc8a74277ba..cc211a82d56b0 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -309,16 +309,22 @@ std::string TestUtility::convertTime(const std::string& input, const std::string } // static -bool TestUtility::gaugesZeroed(const std::vector& gauges) { - // Returns true if all gauges are 0 except the circuit_breaker remaining resource +std::string TestUtility::nonZeroedGauges(const std::vector& gauges) { + // Returns all gauges that are 0 except the circuit_breaker remaining resource // gauges which default to the resource max. std::regex omitted(".*circuit_breakers\\..*\\.remaining.*"); + std::string non_zero; for (const Stats::GaugeSharedPtr& gauge : gauges) { if (!std::regex_match(gauge->name(), omitted) && gauge->value() != 0) { - return false; + non_zero.append(fmt::format("{}: {}; ", gauge->name(), gauge->value())); } } - return true; + return non_zero; +} + +// static +bool TestUtility::gaugesZeroed(const std::vector& gauges) { + return nonZeroedGauges(gauges).empty(); } // static diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 433a7b363f8b4..65e7a34f861b0 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -510,6 +510,11 @@ class TestUtility { static bool gaugesZeroed( const std::vector>& gauges); + /** + * Returns the members of gauges that are not zero. Uses the same regex filter as gaugesZeroed(). + */ + static std::string nonZeroedGauges(const std::vector& gauges); + // Strict variants of Protobuf::MessageUtil static void loadFromJson(const std::string& json, Protobuf::Message& message, bool preserve_original_type = false) {