diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 3573a9d753789..a7f45486b426b 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -106,6 +106,7 @@ float ConnPoolImplBase::perUpstreamPreconnectRatio() const { } ConnPoolImplBase::ConnectionResult ConnPoolImplBase::tryCreateNewConnections() { + ASSERT(!is_draining_for_deletion_); ConnPoolImplBase::ConnectionResult result; // Somewhat arbitrarily cap the number of connections preconnected due to new // incoming connections. The preconnect ratio is capped at 3, so in steady @@ -462,7 +463,9 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view // if retry logic submits a new stream to the pool, we don't fail it inline. purgePendingStreams(client.real_host_description_, failure_reason, reason); // See if we should preconnect based on active connections. - tryCreateNewConnections(); + if (!is_draining_for_deletion_) { + tryCreateNewConnections(); + } } // We need to release our resourceManager() resources before checking below for diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index df05bec16f24b..857e570219acf 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -182,6 +182,32 @@ TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { cleanupUpstreamAndDownstream(); } +// Make sure that clusters won't create new connections on teardown. +TEST_P(CdsIntegrationTest, CdsClusterTeardownWhileConnecting) { + initialize(); + test_server_->waitForCounterGe("cluster_manager.cluster_added", 1); + + // Make the upstreams stop working, to ensure the connection was not + // established. + fake_upstreams_[1]->dispatcher()->exit(); + fake_upstreams_[2]->dispatcher()->exit(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/cluster1"}, {":scheme", "http"}, {":authority", "host"}}); + + // Tell Envoy that cluster_1 is gone. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, {}, {}, + {ClusterName1}, "42"); + // We can continue the test once we're sure that Envoy's ClusterManager has made use of + // the DiscoveryResponse that says cluster_1 is gone. + test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); + codec_client_->sendReset(encoder_decoder.first); + cleanupUpstreamAndDownstream(); + // Make sure there was only one connection attempt. + EXPECT_LE(test_server_->counter("cluster.cluster_1.upstream_cx_total")->value(), 1); +} + // Test the fast addition and removal of clusters when they use ThreadAwareLb. TEST_P(CdsIntegrationTest, CdsClusterWithThreadAwareLbCycleUpDownUp) { // Calls our initialize(), which includes establishing a listener, route, and cluster. diff --git a/tools/base/envoy_python.bzl b/tools/base/envoy_python.bzl index 3745bc774a5d2..284816da6bd90 100644 --- a/tools/base/envoy_python.bzl +++ b/tools/base/envoy_python.bzl @@ -1,4 +1,4 @@ -load("@rules_python//python:defs.bzl", "py_binary", "py_library") +load("@rules_python//python:defs.bzl", "py_binary") load("@base_pip3//:requirements.bzl", base_entry_point = "entry_point") def envoy_py_test(name, package, visibility, envoy_prefix = "@envoy"):