diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 76ed1b37bc4bf..6b1d9c0209868 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -21,6 +21,12 @@ envoy_cc_library( deps = [":minimal_logger_lib"], ) +envoy_cc_library( + name = "debug_recursion_checker_lib", + hdrs = ["debug_recursion_checker.h"], + deps = [":assert_lib"], +) + envoy_cc_library( name = "backoff_lib", srcs = ["backoff_strategy.cc"], diff --git a/source/common/common/debug_recursion_checker.h b/source/common/common/debug_recursion_checker.h new file mode 100644 index 0000000000000..e2ab275ca219d --- /dev/null +++ b/source/common/common/debug_recursion_checker.h @@ -0,0 +1,42 @@ +#pragma once + +#include "common/common/assert.h" + +namespace Envoy { +namespace Common { +/** + * A helper class to assert that a call is not recursive. + */ +class DebugRecursionChecker { +public: + void enter() { + ASSERT(!entered_, "A resource should only be entered once"); +#if !defined(NDEBUG) + entered_ = true; +#endif // !defined(NDEBUG) + } + + void exit() { +#if !defined(NDEBUG) + entered_ = false; +#endif // !defined(NDEBUG) + } + +private: + bool entered_ = false; +}; + +class AutoDebugRecursionChecker { +public: + explicit AutoDebugRecursionChecker(DebugRecursionChecker& checker) : checker_(checker) { + checker.enter(); + } + + ~AutoDebugRecursionChecker() { checker_.exit(); } + +private: + DebugRecursionChecker& checker_; +}; + +} // namespace Common +} // namespace Envoy diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 6a1b53f365bd4..aaad5800e0d1a 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -60,12 +60,31 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/router:shadow_writer_lib", "//source/common/tcp:conn_pool_lib", + "//source/common/upstream:conn_pool_map", + "//source/common/upstream:conn_pool_map_impl_lib", "//source/common/upstream:upstream_lib", "@envoy_api//envoy/admin/v2alpha:config_dump_cc", "@envoy_api//envoy/api/v2/core:base_cc", ], ) +envoy_cc_library( + name = "conn_pool_map", + hdrs = ["conn_pool_map.h"], + deps = [ + "//include/envoy/event:dispatcher_interface", + "//source/common/common:debug_recursion_checker_lib", + ], +) + +envoy_cc_library( + name = "conn_pool_map_impl_lib", + hdrs = ["conn_pool_map_impl.h"], + deps = [ + ":conn_pool_map", + ], +) + envoy_cc_library( name = "edf_scheduler_lib", hdrs = ["edf_scheduler.h"], diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index dd11deb034f13..46d96b2320ac9 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -30,6 +30,7 @@ #include "common/router/shadow_writer_impl.h" #include "common/tcp/conn_pool.h" #include "common/upstream/cds_api_impl.h" +#include "common/upstream/conn_pool_map_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/maglev_lb.h" #include "common/upstream/original_dst_cluster.h" @@ -817,9 +818,9 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::~ThreadLocalClusterManagerImp void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(const HostVector& hosts) { for (const HostSharedPtr& host : hosts) { { - auto container = host_http_conn_pool_map_.find(host); - if (container != host_http_conn_pool_map_.end()) { - drainConnPools(host, container->second); + auto container = getHttpConnPoolsContainer(host); + if (container != nullptr) { + drainConnPools(host, *container); } } { @@ -833,38 +834,49 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(const Hos void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools( HostSharedPtr old_host, ConnPoolsContainer& container) { - container.drains_remaining_ += container.pools_.size(); - - for (const auto& pair : container.pools_) { - pair.second->addDrainedCallback([this, old_host]() -> void { - if (destroying_) { - // It is possible for a connection pool to fire drain callbacks during destruction. Instead - // of checking if old_host actually exists in the map, it's clearer and cleaner to keep - // track of destruction as a separate state and check for it here. This also allows us to - // do this check here versus inside every different connection pool implementation. - return; - } + container.drains_remaining_ += container.pools_->size(); + + // Make a copy to protect against erasure in the callback. + std::shared_ptr pools = container.pools_; + pools->addDrainedCallback([this, old_host]() -> void { + if (destroying_) { + // It is possible for a connection pool to fire drain callbacks during destruction. Instead + // of checking if old_host actually exists in the map, it's clearer and cleaner to keep + // track of destruction as a separate state and check for it here. This also allows us to + // do this check here versus inside every different connection pool implementation. + return; + } - ConnPoolsContainer& container = host_http_conn_pool_map_[old_host]; - ASSERT(container.drains_remaining_ > 0); - container.drains_remaining_--; - if (container.drains_remaining_ == 0) { - for (auto& pair : container.pools_) { - thread_local_dispatcher_.deferredDelete(std::move(pair.second)); - } - host_http_conn_pool_map_.erase(old_host); - } - }); + ConnPoolsContainer* to_clear = getHttpConnPoolsContainer(old_host); + if (to_clear == nullptr) { + // This could happen if we have cleaned out the host before iterating through every connection + // pool. Handle it by just continuing. + return; + } - // The above addDrainedCallback() drain completion callback might execute immediately. This can - // then effectively nuke 'container', which means we can't continue to loop on its contents - // (we're done here). - if (host_http_conn_pool_map_.count(old_host) == 0) { - break; + ASSERT(to_clear->drains_remaining_ > 0); + to_clear->drains_remaining_--; + if (to_clear->drains_remaining_ == 0 && to_clear->ready_to_drain_) { + clearContainer(old_host, *to_clear); } + }); + + // We need to hold off on actually emptying out the container until we have finished processing + // `addDrainedCallback`. If we do not, then it's possible that the container could be erased in + // the middle of its iteration, which leads to undefined behaviour. We handle that case by + // checking here to see if the drains have completed. + container.ready_to_drain_ = true; + if (container.drains_remaining_ == 0) { + clearContainer(old_host, container); } } +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::clearContainer( + HostSharedPtr old_host, ConnPoolsContainer& container) { + container.pools_->clear(); + host_http_conn_pool_map_.erase(old_host); +} + void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainTcpConnPools( HostSharedPtr old_host, TcpConnPoolsContainer& container) { container.drains_remaining_ += container.pools_.size(); @@ -946,12 +958,9 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( // more targeted. ThreadLocalClusterManagerImpl& config = tls.getTyped(); { - const auto& container = config.host_http_conn_pool_map_.find(host); - if (container != config.host_http_conn_pool_map_.end()) { - for (const auto& pair : container->second.pools_) { - const Http::ConnectionPool::InstancePtr& pool = pair.second; - pool->drainConnections(); - } + const auto container = config.getHttpConnPoolsContainer(host); + if (container != nullptr) { + container->pools_->drainConnections(); } } { @@ -986,6 +995,21 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( } } +ClusterManagerImpl::ThreadLocalClusterManagerImpl::ConnPoolsContainer* +ClusterManagerImpl::ThreadLocalClusterManagerImpl::getHttpConnPoolsContainer( + const HostConstSharedPtr& host, bool allocate) { + auto container_iter = host_http_conn_pool_map_.find(host); + if (container_iter == host_http_conn_pool_map_.end()) { + if (!allocate) { + return nullptr; + } + ConnPoolsContainer container{thread_local_dispatcher_}; + container_iter = host_http_conn_pool_map_.emplace(host, std::move(container)).first; + } + + return &container_iter->second; +} + ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( ThreadLocalClusterManagerImpl& parent, ClusterInfoConstSharedPtr cluster, const LoadBalancerFactorySharedPtr& lb_factory) @@ -1092,14 +1116,17 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( } } - ConnPoolsContainer& container = parent_.host_http_conn_pool_map_[host]; - if (!container.pools_[hash_key]) { - container.pools_[hash_key] = parent_.parent_.factory_.allocateConnPool( + ConnPoolsContainer& container = *parent_.getHttpConnPoolsContainer(host, true); + + // Note: to simplify this, we assume that the factory is only called in the scope of this + // function. Otherwise, we'd need to capture a few of these variables by value. + Http::ConnectionPool::Instance& pool = container.pools_->getPool(hash_key, [&]() { + return parent_.parent_.factory_.allocateConnPool( parent_.thread_local_dispatcher_, host, priority, protocol, have_options ? context->downstreamConnection()->socketOptions() : nullptr); - } + }); - return container.pools_[hash_key].get(); + return &pool; } Tcp::ConnectionPool::Instance* diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index f9a3bf5abc870..04431ca14742e 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -22,6 +22,7 @@ #include "common/config/grpc_mux_impl.h" #include "common/http/async_client_impl.h" +#include "common/upstream/conn_pool_map.h" #include "common/upstream/load_stats_reporter.h" #include "common/upstream/upstream_impl.h" @@ -231,9 +232,14 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable, Http::ConnectionPool::InstancePtr> ConnPools; + ConnPoolsContainer(Event::Dispatcher& dispatcher) + : pools_{std::make_shared(dispatcher)} {} - ConnPools pools_; + typedef ConnPoolMap, Http::ConnectionPool::Instance> ConnPools; + + // This is a shared_ptr so we can keep it alive while cleaning up. + std::shared_ptr pools_; + bool ready_to_drain_{false}; uint64_t drains_remaining_{}; }; @@ -307,6 +313,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable thread_local_clusters_; diff --git a/source/common/upstream/conn_pool_map.h b/source/common/upstream/conn_pool_map.h new file mode 100644 index 0000000000000..c602c5dd96bc2 --- /dev/null +++ b/source/common/upstream/conn_pool_map.h @@ -0,0 +1,61 @@ +#pragma once + +#include + +#include "envoy/event/dispatcher.h" + +#include "common/common/debug_recursion_checker.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { +namespace Upstream { +/** + * A class mapping keys to connection pools, with some recycling logic built in. + */ +template class ConnPoolMap { +public: + using PoolFactory = std::function()>; + using DrainedCb = std::function; + + ConnPoolMap(Event::Dispatcher& dispatcher); + ~ConnPoolMap(); + /** + * Returns an existing pool for `key`, or creates a new one using `factory`. Note that it is + * possible for this to fail if a limit on the number of pools allowed is reached. + * @return The pool corresponding to `key`, or `absl::nullopt`. + */ + POOL_TYPE& getPool(KEY_TYPE key, const PoolFactory& factory); + + /** + * @return the number of pools. + */ + size_t size() const; + + /** + * Destroys all mapped pools. + */ + void clear(); + + /** + * Adds a drain callback to all mapped pools. Any future mapped pools with have the callback + * automatically added. Be careful with the callback. If it itself calls into `this`, modifying + * the state of `this`, there is a good chance it will cause corruption due to the callback firing + * immediately. + */ + void addDrainedCallback(const DrainedCb& cb); + + /** + * Instructs each connection pool to drain its connections. + */ + void drainConnections(); + +private: + absl::flat_hash_map> active_pools_; + Event::Dispatcher& thread_local_dispatcher_; + std::vector cached_callbacks_; + Common::DebugRecursionChecker recursion_checker_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h new file mode 100644 index 0000000000000..518805106ca93 --- /dev/null +++ b/source/common/upstream/conn_pool_map_impl.h @@ -0,0 +1,67 @@ +#pragma once + +#include "common/upstream/conn_pool_map.h" + +namespace Envoy { +namespace Upstream { + +template +ConnPoolMap::ConnPoolMap(Envoy::Event::Dispatcher& dispatcher) + : thread_local_dispatcher_(dispatcher) {} + +template +ConnPoolMap::~ConnPoolMap() = default; + +template +POOL_TYPE& ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& factory) { + Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); + // TODO(klarose): Consider how we will change the connection pool's configuration in the future. + // The plan is to change the downstream socket options... We may want to take those as a parameter + // here. Maybe we'll pass them to the factory function? + auto inserted = active_pools_.emplace(key, nullptr); + + // If we inserted a new element, create a pool and assign it to the iterator. Tell it about any + // cached callbacks. + if (inserted.second) { + inserted.first->second = factory(); + for (const auto& cb : cached_callbacks_) { + inserted.first->second->addDrainedCallback(cb); + } + } + + return *inserted.first->second; +} + +template +size_t ConnPoolMap::size() const { + return active_pools_.size(); +} + +template void ConnPoolMap::clear() { + Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); + for (auto& pool_pair : active_pools_) { + thread_local_dispatcher_.deferredDelete(std::move(pool_pair.second)); + } + + active_pools_.clear(); +} + +template +void ConnPoolMap::addDrainedCallback(const DrainedCb& cb) { + Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); + for (auto& pool_pair : active_pools_) { + pool_pair.second->addDrainedCallback(cb); + } + + cached_callbacks_.emplace_back(std::move(cb)); +} + +template +void ConnPoolMap::drainConnections() { + Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); + for (auto& pool_pair : active_pools_) { + pool_pair.second->drainConnections(); + } +} +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index cddee4ad412fd..f40a35b0d640b 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -60,6 +60,19 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "conn_pool_map_impl_test", + srcs = ["conn_pool_map_impl_test.cc"], + deps = [ + "//include/envoy/http:conn_pool_interface", + "//source/common/upstream:conn_pool_map_impl_lib", + "//test/mocks:common_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/http:conn_pool_mocks", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "edf_scheduler_test", srcs = ["edf_scheduler_test.cc"], diff --git a/test/common/upstream/conn_pool_map_impl_test.cc b/test/common/upstream/conn_pool_map_impl_test.cc new file mode 100644 index 0000000000000..11de76b94ea6d --- /dev/null +++ b/test/common/upstream/conn_pool_map_impl_test.cc @@ -0,0 +1,235 @@ +#include +#include + +#include "envoy/http/conn_pool.h" + +#include "common/upstream/conn_pool_map_impl.h" + +#include "test/mocks/common.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/conn_pool.h" +#include "test/test_common/test_base.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Invoke; +using testing::NiceMock; +using testing::SaveArg; + +namespace Envoy { +namespace Upstream { + +class ConnPoolMapImplTest : public TestBase { +public: + // Note, we could test with Http::ConnectionPool::MockInstance here, which would simplify the + // test. However, it's nice to test against an actual interface we'll be using. + using TestMap = ConnPoolMap; + using TestMapPtr = std::unique_ptr; + + TestMapPtr makeTestMap() { return std::make_unique(dispatcher_); } + + TestMap::PoolFactory getBasicFactory() { + return [&]() { + auto pool = std::make_unique>(); + mock_pools_.push_back(pool.get()); + return pool; + }; + } + + TestMap::PoolFactory getFactoryExpectDrainedCb(Http::ConnectionPool::Instance::DrainedCb* cb) { + return [this, cb]() { + auto pool = std::make_unique>(); + EXPECT_CALL(*pool, addDrainedCallback(_)).WillOnce(SaveArg<0>(cb)); + mock_pools_.push_back(pool.get()); + return pool; + }; + } + +protected: + NiceMock dispatcher_; + std::vector*> mock_pools_; +}; + +TEST_F(ConnPoolMapImplTest, TestMapIsEmptyOnConstruction) { + TestMapPtr test_map = makeTestMap(); + + EXPECT_EQ(test_map->size(), 0); +} + +TEST_F(ConnPoolMapImplTest, TestAddingAConnPoolIncreasesSize) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + EXPECT_EQ(test_map->size(), 1); +} + +TEST_F(ConnPoolMapImplTest, TestAddingTwoConnPoolsIncreasesSize) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + EXPECT_EQ(test_map->size(), 2); +} + +TEST_F(ConnPoolMapImplTest, TestConnPoolReturnedMatchesCreated) { + TestMapPtr test_map = makeTestMap(); + + Http::ConnectionPool::Instance& pool = test_map->getPool(1, getBasicFactory()); + EXPECT_EQ(&pool, mock_pools_[0]); +} + +TEST_F(ConnPoolMapImplTest, TestConnSecondPoolReturnedMatchesCreated) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + Http::ConnectionPool::Instance& pool = test_map->getPool(2, getBasicFactory()); + EXPECT_EQ(&pool, mock_pools_[1]); +} + +TEST_F(ConnPoolMapImplTest, TestMultipleOfSameKeyReturnsOriginal) { + TestMapPtr test_map = makeTestMap(); + + Http::ConnectionPool::Instance& pool1 = test_map->getPool(1, getBasicFactory()); + Http::ConnectionPool::Instance& pool2 = test_map->getPool(2, getBasicFactory()); + + EXPECT_EQ(&pool1, &test_map->getPool(1, getBasicFactory())); + EXPECT_EQ(&pool2, &test_map->getPool(2, getBasicFactory())); + EXPECT_EQ(test_map->size(), 2); +} + +TEST_F(ConnPoolMapImplTest, TestEmptyClerWorks) { + TestMapPtr test_map = makeTestMap(); + + test_map->clear(); + EXPECT_EQ(test_map->size(), 0); +} + +TEST_F(ConnPoolMapImplTest, TestClearEmptiesOutMap) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + + test_map->clear(); + EXPECT_EQ(test_map->size(), 0); +} + +TEST_F(ConnPoolMapImplTest, CallbacksPassedToPools) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + Http::ConnectionPool::Instance::DrainedCb cb1; + EXPECT_CALL(*mock_pools_[0], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cb1)); + Http::ConnectionPool::Instance::DrainedCb cb2; + EXPECT_CALL(*mock_pools_[1], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cb2)); + + ReadyWatcher watcher; + test_map->addDrainedCallback([&watcher] { watcher.ready(); }); + + EXPECT_CALL(watcher, ready()).Times(2); + cb1(); + cb2(); +} + +// Tests that if we add the callback first, it is passed along when pools are created later. +TEST_F(ConnPoolMapImplTest, CallbacksCachedAndPassedOnCreation) { + TestMapPtr test_map = makeTestMap(); + + ReadyWatcher watcher; + test_map->addDrainedCallback([&watcher] { watcher.ready(); }); + + Http::ConnectionPool::Instance::DrainedCb cb1; + test_map->getPool(1, getFactoryExpectDrainedCb(&cb1)); + + Http::ConnectionPool::Instance::DrainedCb cb2; + test_map->getPool(2, getFactoryExpectDrainedCb(&cb2)); + + EXPECT_CALL(watcher, ready()).Times(2); + cb1(); + cb2(); +} + +// Tests that if we drain connections on an empty map, nothing happens. +TEST_F(ConnPoolMapImplTest, EmptyMapDrainConnectionsNop) { + TestMapPtr test_map = makeTestMap(); + test_map->drainConnections(); +} + +// Tests that we forward drainConnections to the pools. +TEST_F(ConnPoolMapImplTest, DrainConnectionsForwarded) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + EXPECT_CALL(*mock_pools_[0], drainConnections()); + EXPECT_CALL(*mock_pools_[1], drainConnections()); + + test_map->drainConnections(); +} + +TEST_F(ConnPoolMapImplTest, ClearDefersDelete) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + test_map->getPool(2, getBasicFactory()); + test_map->clear(); + + EXPECT_EQ(dispatcher_.to_delete_.size(), 2); +} + +// The following tests only die in debug builds, so don't run them if this isn't one. +#if !defined(NDEBUG) +class ConnPoolMapImplDeathTest : public ConnPoolMapImplTest {}; + +TEST_F(ConnPoolMapImplDeathTest, ReentryClearTripsAssert) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + ON_CALL(*mock_pools_[0], addDrainedCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + + EXPECT_DEATH_LOG_TO_STDERR(test_map->addDrainedCallback([&test_map] { test_map->clear(); }), + ".*Details: A resource should only be entered once"); +} + +TEST_F(ConnPoolMapImplDeathTest, ReentryGetPoolTripsAssert) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + ON_CALL(*mock_pools_[0], addDrainedCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + + EXPECT_DEATH_LOG_TO_STDERR( + test_map->addDrainedCallback([&test_map, this] { test_map->getPool(2, getBasicFactory()); }), + ".*Details: A resource should only be entered once"); +} + +TEST_F(ConnPoolMapImplDeathTest, ReentryDrainConnectionsTripsAssert) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + ON_CALL(*mock_pools_[0], addDrainedCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + + EXPECT_DEATH_LOG_TO_STDERR( + test_map->addDrainedCallback([&test_map] { test_map->drainConnections(); }), + ".*Details: A resource should only be entered once"); +} + +TEST_F(ConnPoolMapImplDeathTest, ReentryAddDrainedCallbackTripsAssert) { + TestMapPtr test_map = makeTestMap(); + + test_map->getPool(1, getBasicFactory()); + ON_CALL(*mock_pools_[0], addDrainedCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + + EXPECT_DEATH_LOG_TO_STDERR( + test_map->addDrainedCallback([&test_map] { test_map->addDrainedCallback([]() {}); }), + ".*Details: A resource should only be entered once"); +} +#endif // !defined(NDEBUG) +} // namespace Upstream +} // namespace Envoy