diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 76950f3ccde71..e1c94b9e6deaf 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -228,7 +228,7 @@ void ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& clien } } -ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) { +ConnectionPool::Cancellable* ConnPoolImplBase::newStreamImpl(AttachContext& context) { ASSERT(!deferred_deleting_); ASSERT(static_cast(connecting_stream_capacity_) == @@ -277,7 +277,7 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) } } -bool ConnPoolImplBase::maybePreconnect(float global_preconnect_ratio) { +bool ConnPoolImplBase::maybePreconnectImpl(float global_preconnect_ratio) { ASSERT(!deferred_deleting_); return tryCreateNewConnection(global_preconnect_ratio) == ConnectionResult::CreatedNewConnection; } diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 8e06e8e68ed2b..5ce4d9936feef 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -162,9 +162,15 @@ class ConnPoolImplBase : protected Logger::Loggable { int64_t connecting_and_connected_capacity, float preconnect_ratio, bool anticipate_incoming_stream = false); + // Envoy::ConnectionPool::Instance implementation helpers void addIdleCallbackImpl(Instance::IdleCb cb); + // Returns true if the pool is idle. + bool isIdleImpl() const; void startDrainImpl(); void drainConnectionsImpl(); + const Upstream::HostConstSharedPtr& host() const { return host_; } + // Called if this pool is likely to be picked soon, to determine if it's worth preconnecting. + bool maybePreconnectImpl(float global_preconnect_ratio); // Closes and destroys all connections. This must be called in the destructor of // derived classes because the derived ActiveClient will downcast parent_ to a more @@ -196,16 +202,11 @@ class ConnPoolImplBase : protected Logger::Loggable { void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, Network::ConnectionEvent event); - // Returns true if the pool is idle. - bool isIdleImpl() const; - // See if the pool has gone idle. If we're draining, this will also close idle connections. void checkForIdleAndCloseIdleConnsIfDraining(); void scheduleOnUpstreamReady(); - ConnectionPool::Cancellable* newStream(AttachContext& context); - // Called if this pool is likely to be picked soon, to determine if it's worth preconnecting. - bool maybePreconnect(float global_preconnect_ratio); + ConnectionPool::Cancellable* newStreamImpl(AttachContext& context); virtual ConnectionPool::Cancellable* newPendingStream(AttachContext& context) PURE; @@ -220,7 +221,6 @@ class ConnPoolImplBase : protected Logger::Loggable { // Called by derived classes any time a stream is completed or destroyed for any reason. void onStreamClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_stream); - const Upstream::HostConstSharedPtr& host() const { return host_; } Event::Dispatcher& dispatcher() { return dispatcher_; } Upstream::ResourcePriority priority() const { return priority_; } const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() { return socket_options_; } diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 4a67bea9da859..8bc119a38f871 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -60,7 +60,7 @@ ConnectionPool::Cancellable* HttpConnPoolImplBase::newStream(Http::ResponseDecoder& response_decoder, Http::ConnectionPool::Callbacks& callbacks) { HttpAttachContext context({&response_decoder, &callbacks}); - return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); + return newStreamImpl(context); } bool HttpConnPoolImplBase::hasActiveConnections() const { diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index e2f7494c0e38b..e14a6ffa2ae3a 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -66,9 +66,7 @@ class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, Http::ConnectionPool::Callbacks& callbacks) override; - bool maybePreconnect(float ratio) override { - return Envoy::ConnectionPool::ConnPoolImplBase::maybePreconnect(ratio); - } + bool maybePreconnect(float ratio) override { return maybePreconnectImpl(ratio); } bool hasActiveConnections() const override; // Creates a new PendingStream and enqueues it into the queue. diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index 398254b498461..949feeb7ee7a2 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -176,10 +176,10 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, } ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override { TcpAttachContext context(&callbacks); - return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); + return newStreamImpl(context); } bool maybePreconnect(float preconnect_ratio) override { - return Envoy::ConnectionPool::ConnPoolImplBase::maybePreconnect(preconnect_ratio); + return maybePreconnectImpl(preconnect_ratio); } ConnectionPool::Cancellable* diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.h b/source/extensions/clusters/dynamic_forward_proxy/cluster.h index dcd1e235607f5..e7266b97692e1 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.h +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.h @@ -51,7 +51,8 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, using HostInfoMap = absl::flat_hash_map; - struct LoadBalancer : public Upstream::LoadBalancer { + class LoadBalancer : public Upstream::LoadBalancer { + public: LoadBalancer(const Cluster& cluster) : cluster_(cluster) {} // Upstream::LoadBalancer @@ -61,19 +62,23 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, return nullptr; } + private: const Cluster& cluster_; }; - struct LoadBalancerFactory : public Upstream::LoadBalancerFactory { + class LoadBalancerFactory : public Upstream::LoadBalancerFactory { + public: LoadBalancerFactory(Cluster& cluster) : cluster_(cluster) {} // Upstream::LoadBalancerFactory Upstream::LoadBalancerPtr create() override { return std::make_unique(cluster_); } + private: Cluster& cluster_; }; - struct ThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer { + class ThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer { + public: ThreadAwareLoadBalancer(Cluster& cluster) : cluster_(cluster) {} // Upstream::ThreadAwareLoadBalancer @@ -82,6 +87,7 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, } void initialize() override {} + private: Cluster& cluster_; }; diff --git a/test/common/conn_pool/conn_pool_base_test.cc b/test/common/conn_pool/conn_pool_base_test.cc index f1f53f69f9fd8..869dafa68a678 100644 --- a/test/common/conn_pool/conn_pool_base_test.cc +++ b/test/common/conn_pool/conn_pool_base_test.cc @@ -108,7 +108,7 @@ TEST_F(ConnPoolImplBaseTest, BasicPreconnect) { // On new stream, create 2 connections. CHECK_STATE(0 /*active*/, 0 /*pending*/, 0 /*connecting capacity*/); EXPECT_CALL(pool_, instantiateActiveClient).Times(2); - auto cancelable = pool_.newStream(context_); + auto cancelable = pool_.newStreamImpl(context_); CHECK_STATE(0 /*active*/, 1 /*pending*/, 2 /*connecting capacity*/); cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess); @@ -124,13 +124,13 @@ TEST_F(ConnPoolImplBaseTest, PreconnectOnDisconnect) { // On new stream, create 2 connections. EXPECT_CALL(pool_, instantiateActiveClient).Times(2); - pool_.newStream(context_); + pool_.newStreamImpl(context_); CHECK_STATE(0 /*active*/, 1 /*pending*/, 2 /*connecting capacity*/); // If a connection fails, existing connections are purged. If a retry causes // a new stream, make sure we create the correct number of connections. EXPECT_CALL(pool_, onPoolFailure).WillOnce(InvokeWithoutArgs([&]() -> void { - pool_.newStream(context_); + pool_.newStreamImpl(context_); })); EXPECT_CALL(pool_, instantiateActiveClient); clients_[0]->close(); @@ -149,7 +149,7 @@ TEST_F(ConnPoolImplBaseTest, NoPreconnectIfUnhealthy) { // On new stream, create 1 connection. EXPECT_CALL(pool_, instantiateActiveClient); - auto cancelable = pool_.newStream(context_); + auto cancelable = pool_.newStreamImpl(context_); CHECK_STATE(0 /*active*/, 1 /*pending*/, 1 /*connecting capacity*/); cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess); @@ -166,7 +166,7 @@ TEST_F(ConnPoolImplBaseTest, NoPreconnectIfDegraded) { // On new stream, create 1 connection. EXPECT_CALL(pool_, instantiateActiveClient); - auto cancelable = pool_.newStream(context_); + auto cancelable = pool_.newStreamImpl(context_); cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess); pool_.destructAllConnections(); @@ -178,17 +178,17 @@ TEST_F(ConnPoolImplBaseTest, ExplicitPreconnect) { EXPECT_CALL(pool_, instantiateActiveClient).Times(AnyNumber()); // With global preconnect off, we won't preconnect. - EXPECT_FALSE(pool_.maybePreconnect(0)); + EXPECT_FALSE(pool_.maybePreconnectImpl(0)); CHECK_STATE(0 /*active*/, 0 /*pending*/, 0 /*connecting capacity*/); // With preconnect ratio of 1.1, we'll preconnect two connections. // Currently, no number of subsequent calls to preconnect will increase that. - EXPECT_TRUE(pool_.maybePreconnect(1.1)); - EXPECT_TRUE(pool_.maybePreconnect(1.1)); - EXPECT_FALSE(pool_.maybePreconnect(1.1)); + EXPECT_TRUE(pool_.maybePreconnectImpl(1.1)); + EXPECT_TRUE(pool_.maybePreconnectImpl(1.1)); + EXPECT_FALSE(pool_.maybePreconnectImpl(1.1)); CHECK_STATE(0 /*active*/, 0 /*pending*/, 2 /*connecting capacity*/); // With a higher preconnect ratio, more connections may be preconnected. - EXPECT_TRUE(pool_.maybePreconnect(3)); + EXPECT_TRUE(pool_.maybePreconnectImpl(3)); pool_.destructAllConnections(); } @@ -199,7 +199,7 @@ TEST_F(ConnPoolImplBaseTest, ExplicitPreconnectNotHealthy) { // Preconnect won't occur if the host is not healthy. host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH); - EXPECT_FALSE(pool_.maybePreconnect(1)); + EXPECT_FALSE(pool_.maybePreconnectImpl(1)); } // Remote close simulates the peer closing the connection. @@ -208,7 +208,7 @@ TEST_F(ConnPoolImplBaseTest, PoolIdleCallbackTriggeredRemoteClose) { // Create a new stream using the pool EXPECT_CALL(pool_, instantiateActiveClient); - pool_.newStream(context_); + pool_.newStreamImpl(context_); ASSERT_EQ(1, clients_.size()); // Emulate the new upstream connection establishment @@ -236,7 +236,7 @@ TEST_F(ConnPoolImplBaseTest, PoolIdleCallbackTriggeredLocalClose) { // Create a new stream using the pool EXPECT_CALL(pool_, instantiateActiveClient); - pool_.newStream(context_); + pool_.newStreamImpl(context_); ASSERT_EQ(1, clients_.size()); // Emulate the new upstream connection establishment