Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
64 commits
Select commit Hold shift + click to select a range
0016a00
Turn peerset priority groups into sets
tomaka Oct 15, 2020
749873f
Wip
tomaka Oct 16, 2020
bc8871c
Err, tabs
tomaka Oct 16, 2020
d86b51f
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Oct 16, 2020
375189d
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Oct 22, 2020
d6212db
WIP
tomaka Oct 22, 2020
763be4a
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Oct 22, 2020
e1128b9
Adjust network config
tomaka Oct 22, 2020
86d6666
Wip
tomaka Oct 22, 2020
6dedc77
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Nov 12, 2020
16c84a7
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Nov 16, 2020
8d9cbdf
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Nov 17, 2020
65d893c
Small doc
tomaka Nov 17, 2020
19438e6
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Nov 18, 2020
e6ec01a
WIP
tomaka Nov 20, 2020
9df04ff
Update client/peerset/src/lib.rs
wheresaddie Nov 23, 2020
7cbce24
Update client/peerset/src/lib.rs
wheresaddie Nov 23, 2020
21e1658
Merge branch 'rework-priority-groups' of github.com:tomaka/polkadot i…
tomaka Nov 23, 2020
9c4ad24
Don't use names for sets but indices
tomaka Nov 24, 2020
70a58f7
WIP
tomaka Nov 24, 2020
cd26e2d
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Nov 27, 2020
036aba5
Fix some tests
tomaka Nov 27, 2020
28beefc
More test fix
tomaka Nov 27, 2020
dfd06f5
Silence warnings
tomaka Nov 27, 2020
e3524c9
Restore fuzzing test
tomaka Nov 27, 2020
a280dbe
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 1, 2020
19f4b2e
WIP
tomaka Dec 1, 2020
2c22349
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 1, 2020
0196607
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 2, 2020
b1a50b4
Put set_id everywhere
tomaka Dec 2, 2020
009428a
Pass indices rather than names
tomaka Dec 2, 2020
1d1f43f
Divided by two the number of compilation errors
tomaka Dec 2, 2020
15cedb5
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 3, 2020
df5412c
Fix merge downfall
tomaka Dec 3, 2020
440dd26
Progress
tomaka Dec 3, 2020
776b3cf
Finish updating handler.rs
tomaka Dec 3, 2020
21322d8
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 7, 2020
65b269c
WIP
tomaka Dec 7, 2020
a1c6092
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 7, 2020
6fccbec
WIP
tomaka Dec 7, 2020
e73f64b
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 7, 2020
b2c8ab7
Tests compiling
tomaka Dec 8, 2020
a06ea4a
sc-network tests passing
tomaka Dec 8, 2020
8788fe7
GrandPa tests compiling
tomaka Dec 8, 2020
b8d214d
Fix protocols in events
tomaka Dec 8, 2020
222fe3e
WIP
tomaka Dec 8, 2020
1a380ed
WIP
tomaka Dec 8, 2020
500b70b
Grandpa tests now passing
tomaka Dec 8, 2020
97e3101
Fix some warnings
tomaka Dec 8, 2020
ec9b55e
Comment out code to fix all warnings and let CI run
tomaka Dec 8, 2020
4fff66a
Merge remote-tracking branch 'upstream/master' into rework-priority-g…
tomaka Dec 8, 2020
653a4bb
Fix warning
tomaka Dec 9, 2020
a50b4b1
Cut down authority-discovery priority group
tomaka Dec 9, 2020
0bf2726
Allow reserved-only per set
tomaka Dec 9, 2020
221d1d8
WIP
tomaka Dec 9, 2020
6503a97
WIP
tomaka Dec 9, 2020
6b14539
Line widths
tomaka Dec 9, 2020
ff920d4
Update set 1 thing
tomaka Dec 9, 2020
aaf4515
Proper reserved-only handling
tomaka Dec 9, 2020
79042b2
Fix Grandpa path
tomaka Dec 9, 2020
b5f89dd
Restore peerset debug info
tomaka Dec 9, 2020
38eae16
I think I'm done 🎉
tomaka Dec 9, 2020
06f1e21
Fix TODO
tomaka Dec 9, 2020
67b2c53
More done than done
tomaka Dec 9, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions bin/node-template/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
other: (block_import, grandpa_link),
} = new_partial(&config)?;

config.network.notifications_protocols.push(sc_finality_grandpa::GRANDPA_PROTOCOL_NAME.into());
config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());

let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
Expand Down Expand Up @@ -223,7 +223,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;

config.network.notifications_protocols.push(sc_finality_grandpa::GRANDPA_PROTOCOL_NAME.into());
config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());

let select_chain = sc_consensus::LongestChain::new(backend.clone());

Expand Down
4 changes: 2 additions & 2 deletions bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ pub fn new_full_base(

let shared_voter_state = rpc_setup;

config.network.notifications_protocols.push(grandpa::GRANDPA_PROTOCOL_NAME.into());
config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());

let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
Expand Down Expand Up @@ -346,7 +346,7 @@ pub fn new_light_base(mut config: Configuration) -> Result<(
let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;

config.network.notifications_protocols.push(grandpa::GRANDPA_PROTOCOL_NAME.into());
config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());

let select_chain = sc_consensus::LongestChain::new(backend.clone());

Expand Down
70 changes: 0 additions & 70 deletions client/authority-discovery/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,6 @@ pub mod tests;

const LOG_TARGET: &'static str = "sub-authority-discovery";

/// Name of the Substrate peerset priority group for authorities discovered through the authority
/// discovery module.
const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities";

/// Maximum number of addresses cached per authority. Additional addresses are discarded.
const MAX_ADDRESSES_PER_AUTHORITY: usize = 10;

Expand Down Expand Up @@ -113,9 +109,6 @@ pub struct Worker<Client, Network, Block, DhtEventStream> {
publish_interval: ExpIncInterval,
/// Interval at which to request addresses of authorities, refilling the pending lookups queue.
query_interval: ExpIncInterval,
/// Interval on which to set the peerset priority group to a new random
/// set of addresses.
priority_group_set_interval: ExpIncInterval,

/// Queue of throttled lookups pending to be passed to the network.
pending_lookups: Vec<AuthorityId>,
Expand Down Expand Up @@ -164,13 +157,6 @@ where
Duration::from_secs(2),
config.max_query_interval,
);
let priority_group_set_interval = ExpIncInterval::new(
Duration::from_secs(2),
// Trade-off between node connection churn and connectivity. Using half of
// [`crate::WorkerConfig::max_query_interval`] to update priority group once at the
// beginning and once in the middle of each query interval.
config.max_query_interval / 2,
);

let addr_cache = AddrCache::new();

Expand All @@ -194,7 +180,6 @@ where
dht_event_rx,
publish_interval,
query_interval,
priority_group_set_interval,
pending_lookups: Vec::new(),
in_flight_lookups: HashMap::new(),
addr_cache,
Expand Down Expand Up @@ -224,15 +209,6 @@ where
msg = self.from_service.select_next_some() => {
self.process_message_from_service(msg);
},
// Set peerset priority group to a new random set of addresses.
_ = self.priority_group_set_interval.next().fuse() => {
if let Err(e) = self.set_priority_group().await {
error!(
target: LOG_TARGET,
"Failed to set priority group: {:?}", e,
);
}
},
// Publish own addresses.
_ = self.publish_interval.next().fuse() => {
if let Err(e) = self.publish_ext_addresses().await {
Expand Down Expand Up @@ -580,52 +556,13 @@ where

Ok(intersection)
}

/// Set the peer set 'authority' priority group to a new random set of
/// [`Multiaddr`]s.
async fn set_priority_group(&self) -> Result<()> {
let addresses = self.addr_cache.get_random_subset();

if addresses.is_empty() {
debug!(
target: LOG_TARGET,
"Got no addresses in cache for peerset priority group.",
);
return Ok(());
}

if let Some(metrics) = &self.metrics {
metrics.priority_group_size.set(addresses.len().try_into().unwrap_or(std::u64::MAX));
}

debug!(
target: LOG_TARGET,
"Applying priority group {:?} to peerset.", addresses,
);

self.network
.set_priority_group(
AUTHORITIES_PRIORITY_GROUP_NAME.to_string(),
addresses.into_iter().collect(),
).await
.map_err(Error::SettingPeersetPriorityGroup)?;

Ok(())
}
}

/// NetworkProvider provides [`Worker`] with all necessary hooks into the
/// underlying Substrate networking. Using this trait abstraction instead of [`NetworkService`]
/// directly is necessary to unit test [`Worker`].
#[async_trait]
pub trait NetworkProvider: NetworkStateInfo {
/// Modify a peerset priority group.
async fn set_priority_group(
&self,
group_id: String,
peers: HashSet<libp2p::Multiaddr>,
) -> std::result::Result<(), String>;

/// Start putting a value in the Dht.
fn put_value(&self, key: libp2p::kad::record::Key, value: Vec<u8>);

Expand All @@ -639,13 +576,6 @@ where
B: BlockT + 'static,
H: ExHashT,
{
async fn set_priority_group(
&self,
group_id: String,
peers: HashSet<libp2p::Multiaddr>,
) -> std::result::Result<(), String> {
self.set_priority_group(group_id, peers).await
}
fn put_value(&self, key: libp2p::kad::record::Key, value: Vec<u8>) {
self.put_value(key, value)
}
Expand Down
41 changes: 0 additions & 41 deletions client/authority-discovery/src/worker/addr_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,11 @@
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.

use libp2p::core::multiaddr::{Multiaddr, Protocol};
use rand::seq::SliceRandom;
use std::collections::HashMap;

use sp_authority_discovery::AuthorityId;
use sc_network::PeerId;

/// The maximum number of authority connections initialized through the authority discovery module.
///
/// In other words the maximum size of the `authority` peerset priority group.
const MAX_NUM_AUTHORITY_CONN: usize = 10;

/// Cache for [`AuthorityId`] -> [`Vec<Multiaddr>`] and [`PeerId`] -> [`AuthorityId`] mappings.
pub(super) struct AddrCache {
authority_id_to_addresses: HashMap<AuthorityId, Vec<Multiaddr>>,
Expand Down Expand Up @@ -75,30 +69,6 @@ impl AddrCache {
self.peer_id_to_authority_id.get(peer_id)
}

/// Returns a single address for a random subset (maximum of [`MAX_NUM_AUTHORITY_CONN`]) of all
/// known authorities.
pub fn get_random_subset(&self) -> Vec<Multiaddr> {
let mut rng = rand::thread_rng();

let mut addresses = self
.authority_id_to_addresses
.iter()
.filter_map(|(_authority_id, addresses)| {
debug_assert!(!addresses.is_empty());
addresses
.choose(&mut rng)
})
.collect::<Vec<&Multiaddr>>();

addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref()));
addresses.dedup();

addresses
.choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN)
.map(|a| (**a).clone())
.collect()
}

/// Removes all [`PeerId`]s and [`Multiaddr`]s from the cache that are not related to the given
/// [`AuthorityId`]s.
pub fn retain_ids(&mut self, authority_ids: &Vec<AuthorityId>) {
Expand Down Expand Up @@ -190,11 +160,6 @@ mod tests {
cache.insert(second.0.clone(), vec![second.1.clone()]);
cache.insert(third.0.clone(), vec![third.1.clone()]);

let subset = cache.get_random_subset();
assert!(
subset.contains(&first.1) && subset.contains(&second.1) && subset.contains(&third.1),
"Expect initial subset to contain all authorities.",
);
assert_eq!(
Some(&vec![third.1.clone()]),
cache.get_addresses_by_authority_id(&third.0),
Expand All @@ -208,12 +173,6 @@ mod tests {

cache.retain_ids(&vec![first.0, second.0]);

let subset = cache.get_random_subset();
assert!(
subset.contains(&first.1) || subset.contains(&second.1),
"Expected both first and second authority."
);
assert!(!subset.contains(&third.1), "Did not expect address from third authority");
assert_eq!(
None, cache.get_addresses_by_authority_id(&third.0),
"Expect `get_addresses_by_authority_id` to not return `None` for third authority."
Expand Down
44 changes: 1 addition & 43 deletions client/authority-discovery/src/worker/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

use crate::worker::schema;

use std::{iter::FromIterator, sync::{Arc, Mutex}, task::Poll};
use std::{sync::{Arc, Mutex}, task::Poll};

use async_trait::async_trait;
use futures::channel::mpsc::{self, channel};
Expand Down Expand Up @@ -112,10 +112,6 @@ sp_api::mock_impl_runtime_apis! {
pub enum TestNetworkEvent {
GetCalled(kad::record::Key),
PutCalled(kad::record::Key, Vec<u8>),
SetPriorityGroupCalled {
group_id: String,
peers: HashSet<Multiaddr>
},
}

pub struct TestNetwork {
Expand All @@ -125,7 +121,6 @@ pub struct TestNetwork {
// vectors below.
pub put_value_call: Arc<Mutex<Vec<(kad::record::Key, Vec<u8>)>>>,
pub get_value_call: Arc<Mutex<Vec<kad::record::Key>>>,
pub set_priority_group_call: Arc<Mutex<Vec<(String, HashSet<Multiaddr>)>>>,
event_sender: mpsc::UnboundedSender<TestNetworkEvent>,
event_receiver: Option<mpsc::UnboundedReceiver<TestNetworkEvent>>,
}
Expand All @@ -147,7 +142,6 @@ impl Default for TestNetwork {
],
put_value_call: Default::default(),
get_value_call: Default::default(),
set_priority_group_call: Default::default(),
event_sender: tx,
event_receiver: Some(rx),
}
Expand All @@ -156,21 +150,6 @@ impl Default for TestNetwork {

#[async_trait]
impl NetworkProvider for TestNetwork {
async fn set_priority_group(
&self,
group_id: String,
peers: HashSet<Multiaddr>,
) -> std::result::Result<(), String> {
self.set_priority_group_call
.lock()
.unwrap()
.push((group_id.clone(), peers.clone()));
self.event_sender.clone().unbounded_send(TestNetworkEvent::SetPriorityGroupCalled {
group_id,
peers,
}).unwrap();
Ok(())
}
fn put_value(&self, key: kad::record::Key, value: Vec<u8>) {
self.put_value_call.lock().unwrap().push((key.clone(), value.clone()));
self.event_sender.clone().unbounded_send(TestNetworkEvent::PutCalled(key, value)).unwrap();
Expand Down Expand Up @@ -296,14 +275,6 @@ fn publish_discover_cycle() {
let (_dht_event_tx, dht_event_rx) = channel(1000);

let network: Arc<TestNetwork> = Arc::new(Default::default());
let node_a_multiaddr = {
let peer_id = network.local_peer_id();
let address = network.external_addresses().pop().unwrap();

address.with(multiaddr::Protocol::P2p(
peer_id.into(),
))
};

let key_store = KeyStore::new();

Expand Down Expand Up @@ -365,19 +336,6 @@ fn publish_discover_cycle() {

// Make authority discovery handle the event.
worker.handle_dht_event(dht_event).await;

worker.set_priority_group().await.unwrap();

// Expect authority discovery to set the priority set.
assert_eq!(network.set_priority_group_call.lock().unwrap().len(), 1);

assert_eq!(
network.set_priority_group_call.lock().unwrap()[0],
(
"authorities".to_string(),
HashSet::from_iter(vec![node_a_multiaddr.clone()].into_iter())
)
);
}.boxed_local().into());

pool.run();
Expand Down
20 changes: 11 additions & 9 deletions client/cli/src/params/network_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

use crate::params::node_key_params::NodeKeyParams;
use sc_network::{
config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, TransportConfig},
config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig},
multiaddr::Protocol,
};
use sc_service::{ChainSpec, ChainType, config::{Multiaddr, MultiaddrWithPeerId}};
Expand Down Expand Up @@ -150,21 +150,23 @@ impl NetworkParams {
NetworkConfiguration {
boot_nodes,
net_config_path,
reserved_nodes: self.reserved_nodes.clone(),
non_reserved_mode: if self.reserved_only {
NonReservedPeerMode::Deny
} else {
NonReservedPeerMode::Accept
default_peers_set: SetConfig {
in_peers: self.in_peers,
out_peers: self.out_peers,
reserved_nodes: self.reserved_nodes.clone(),
non_reserved_mode: if self.reserved_only {
NonReservedPeerMode::Deny
} else {
NonReservedPeerMode::Accept
},
},
listen_addresses,
public_addresses,
notifications_protocols: Vec::new(),
extra_sets: Vec::new(),
request_response_protocols: Vec::new(),
node_key,
node_name: node_name.to_string(),
client_version: client_id.to_string(),
in_peers: self.in_peers,
out_peers: self.out_peers,
transport: TransportConfig::Normal {
enable_mdns: !is_dev && !self.no_mdns,
allow_private_ipv4: !self.no_private_ipv4,
Expand Down
6 changes: 5 additions & 1 deletion client/finality-grandpa/src/communication/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,11 @@ impl sc_network_gossip::Network<Block> for TestNetwork {
let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit));
}

fn disconnect_peer(&self, _: PeerId) {}
fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {}

fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {}

fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) {}

fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec<u8>) {
let _ = self.sender.unbounded_send(Event::WriteNotification(who, message));
Expand Down
Loading