diff --git a/nano/core_test/peer_container.cpp b/nano/core_test/peer_container.cpp index de2b8df32a..48bcba2723 100644 --- a/nano/core_test/peer_container.cpp +++ b/nano/core_test/peer_container.cpp @@ -174,8 +174,8 @@ TEST (peer_container, DISABLED_list_fanout) nano::test::system system{ 1 }; auto node = system.nodes[0]; ASSERT_EQ (0, node->network.size ()); - ASSERT_EQ (0.0, node->network.size_sqrt ()); - ASSERT_EQ (0, node->network.fanout ()); + ASSERT_EQ (0.0f, node->network.size_log ()); + ASSERT_EQ (1, node->network.fanout ()); ASSERT_TRUE (node->network.list (node->network.fanout ()).empty ()); auto add_peer = [&node, &system] () { @@ -185,26 +185,27 @@ TEST (peer_container, DISABLED_list_fanout) add_peer (); ASSERT_TIMELY_EQ (5s, 1, node->network.size ()); - ASSERT_EQ (1.f, node->network.size_sqrt ()); + ASSERT_EQ (0.0f, node->network.size_log ()); ASSERT_EQ (1, node->network.fanout ()); ASSERT_EQ (1, node->network.list (node->network.fanout ()).size ()); add_peer (); - ASSERT_TIMELY_EQ (5s, 2, node->network.size ()); - ASSERT_EQ (std::sqrt (2.f), node->network.size_sqrt ()); + add_peer (); + ASSERT_TIMELY_EQ (5s, 3, node->network.size ()); + ASSERT_EQ (std::log (3.0f), node->network.size_log ()); ASSERT_EQ (2, node->network.fanout ()); ASSERT_EQ (2, node->network.list (node->network.fanout ()).size ()); unsigned number_of_peers = 10; - for (unsigned i = 2; i < number_of_peers; ++i) + for (unsigned i = 3; i < number_of_peers; ++i) { add_peer (); } ASSERT_TIMELY_EQ (5s, number_of_peers, node->network.size ()); - ASSERT_EQ (std::sqrt (float (number_of_peers)), node->network.size_sqrt ()); - ASSERT_EQ (4, node->network.fanout ()); - ASSERT_EQ (4, node->network.list (node->network.fanout ()).size ()); + ASSERT_EQ (std::log (float (number_of_peers)), node->network.size_log ()); + ASSERT_EQ (3, node->network.fanout ()); + ASSERT_EQ (3, node->network.list (node->network.fanout ()).size ()); } // Test to make sure we don't repeatedly send keepalive messages to nodes that aren't responding diff --git a/nano/node/network.cpp b/nano/node/network.cpp index 57c429c6b8..3f73d69f90 100644 --- a/nano/node/network.cpp +++ b/nano/node/network.cpp @@ -430,7 +430,8 @@ std::deque> nano::network::list_non_pr // Simulating with sqrt_broadcast_simulate shows we only need to broadcast to sqrt(total_peers) random peers in order to successfully publish to everyone with high probability std::size_t nano::network::fanout (float scale) const { - return static_cast (std::ceil (scale * size_sqrt ())); + auto fanout_l = std::max (2.0f, size_log ()); + return static_cast (std::ceil (scale * fanout_l)); } std::unordered_set> nano::network::random_set (std::size_t max_count, uint8_t minimum_version) const @@ -519,9 +520,10 @@ std::size_t nano::network::size () const return tcp_channels.size (); } -float nano::network::size_sqrt () const +float nano::network::size_log () const { - return static_cast (std::sqrt (size ())); + auto size_l = std::max (static_cast (1u), size ()); // Clamp size to domain of std::log + return static_cast (std::log (size_l)); } bool nano::network::empty () const @@ -735,4 +737,4 @@ nano::container_info nano::syn_cookies::container_info () const info.put ("syn_cookies", cookies.size ()); info.put ("syn_cookies_per_ip", cookies_per_ip.size ()); return info; -} \ No newline at end of file +} diff --git a/nano/node/network.hpp b/nano/node/network.hpp index 621e7fdd93..e87c822885 100644 --- a/nano/node/network.hpp +++ b/nano/node/network.hpp @@ -134,7 +134,7 @@ class network final nano::tcp_endpoint bootstrap_peer (); void cleanup (std::chrono::steady_clock::time_point const & cutoff); std::size_t size () const; - float size_sqrt () const; + float size_log () const; bool empty () const; void erase (nano::transport::channel const &); /** Disconnects and adds peer to exclusion list */