Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions bin/node-template/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ pub fn new_full(config: Configuration)
justification_period: 512,
name: Some(name),
observer_enabled: false,
favorite_peers: Default::default(),
keystore,
is_authority,
};
Expand Down
13 changes: 12 additions & 1 deletion bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,12 +125,14 @@ macro_rules! new_full {
name,
disable_grandpa,
sentry_nodes,
reserved_nodes,
) = (
$config.roles.is_authority(),
$config.force_authoring,
$config.name.clone(),
$config.disable_grandpa,
$config.network.sentry_nodes.clone(),
$config.network.reserved_nodes.clone(),
);

// sentry nodes announce themselves as authorities to the network
Expand Down Expand Up @@ -190,7 +192,7 @@ macro_rules! new_full {
let authority_discovery = sc_authority_discovery::AuthorityDiscovery::new(
service.client(),
network,
sentry_nodes,
sentry_nodes.clone(),
service.keystore(),
dht_event_stream,
service.prometheus_registry(),
Expand All @@ -207,12 +209,21 @@ macro_rules! new_full {
None
};

// add reserved and sentry nodes as favorite peers to GRANDPA
let favorite_peers = reserved_nodes
.into_iter()
.chain(sentry_nodes.into_iter())
.filter_map(|n| sc_network::config::parse_str_addr(&n).ok())
.map(|(peer_id, _)| peer_id)
.collect();

let config = grandpa::Config {
// FIXME #1578 make this available through chainspec
gossip_duration: std::time::Duration::from_millis(333),
justification_period: 512,
name: Some(name),
observer_enabled: false,
favorite_peers,
keystore,
is_authority,
};
Expand Down
7 changes: 7 additions & 0 deletions client/finality-grandpa/src/communication/gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1110,6 +1110,9 @@ impl<Block: BlockT> Inner<Block> {
let round_duration = self.config.gossip_duration * ROUND_DURATION;
let round_elapsed = self.round_start.elapsed();

if self.config.favorite_peers.contains(who) {
return true;
}

if !self.config.is_authority
&& round_elapsed < round_duration * PROPAGATION_ALL
Expand Down Expand Up @@ -1170,6 +1173,10 @@ impl<Block: BlockT> Inner<Block> {
let round_duration = self.config.gossip_duration * ROUND_DURATION;
let round_elapsed = self.round_start.elapsed();

if self.config.favorite_peers.contains(who) {
return true;
}

if peer.roles.is_authority() {
let authorities = self.peers.authorities();

Expand Down
6 changes: 6 additions & 0 deletions client/finality-grandpa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@
//! or prune any signaled changes based on whether the signaling block is
//! included in the newly-finalized chain.

use std::collections::HashSet;

use futures::prelude::*;
use futures::StreamExt;
use log::{debug, info};
Expand Down Expand Up @@ -204,6 +206,10 @@ pub struct Config {
pub is_authority: bool,
/// Some local identifier of the voter.
pub name: Option<String>,
/// The set of favorite peers which we should treat preferably when gossiping messages
/// (i.e. we prioritize sending any data to them over other peers). The favorite peers
/// are usually reserved nodes and/or sentry nodes.
pub favorite_peers: HashSet<sc_network::PeerId>,
/// The keystore that manages the keys of this node.
pub keystore: Option<sc_keystore::KeyStorePtr>,
}
Expand Down
21 changes: 18 additions & 3 deletions client/network/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,15 +205,30 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
)?;

// Initialize the reserved peers.
for reserved in params.network_config.reserved_nodes.iter() {
if let Ok((peer_id, addr)) = parse_str_addr(reserved) {
let mut add_reserved = |node| {
if let Ok((peer_id, addr)) = parse_str_addr(node) {
reserved_nodes.push(peer_id.clone());
known_addresses.push((peer_id, addr));
} else {
return true;
}

false
};

for reserved in params.network_config.reserved_nodes.iter() {
if !add_reserved(reserved) {
warn!(target: "sub-libp2p", "Not a valid reserved node address: {}", reserved);
}
}

// treat sentry nodes as reserved for the peerset, we always want to maintain connections to
// our sentries.
for sentry in params.network_config.sentry_nodes.iter() {
if !add_reserved(sentry) {
warn!(target: "sub-libp2p", "Not a valid sentry node address: {}", sentry);
}
}

let peerset_config = sc_peerset::PeersetConfig {
in_peers: params.network_config.in_peers,
out_peers: params.network_config.out_peers,
Expand Down