Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 20 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions bin/node-template/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon

// the AURA authoring task is considered essential, i.e. if it
// fails we take down the service with it.
service.spawn_essential_task(aura);
service.spawn_essential_task("aura", aura);
}

// if the node isn't actively participating in consensus then it doesn't
Expand All @@ -155,7 +155,7 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon
match (is_authority, disable_grandpa) {
(false, false) => {
// start the lightweight GRANDPA observer
service.spawn_task(grandpa::run_grandpa_observer(
service.spawn_task("grandpa-observer", grandpa::run_grandpa_observer(
grandpa_config,
grandpa_link,
service.network(),
Expand All @@ -178,7 +178,7 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon

// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
service.spawn_essential_task(grandpa::run_grandpa_voter(voter_config)?);
service.spawn_essential_task("grandpa", grandpa::run_grandpa_voter(voter_config)?);
},
(_, true) => {
grandpa::setup_disabled_grandpa(
Expand Down
7 changes: 4 additions & 3 deletions bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ macro_rules! new_full {
};

let babe = sc_consensus_babe::start_babe(babe_config)?;
service.spawn_essential_task(babe);
service.spawn_essential_task("babe-proposer", babe);

let network = service.network();
let dht_event_stream = network.event_stream().filter_map(|e| async move { match e {
Expand All @@ -187,7 +187,7 @@ macro_rules! new_full {
dht_event_stream,
);

service.spawn_task(authority_discovery);
service.spawn_task("authority-discovery", authority_discovery);
}

// if the node isn't actively participating in consensus then it doesn't
Expand All @@ -211,7 +211,7 @@ macro_rules! new_full {
match (is_authority, disable_grandpa) {
(false, false) => {
// start the lightweight GRANDPA observer
service.spawn_task(grandpa::run_grandpa_observer(
service.spawn_task("grandpa-observer", grandpa::run_grandpa_observer(
config,
grandpa_link,
service.network(),
Expand All @@ -234,6 +234,7 @@ macro_rules! new_full {
// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
service.spawn_essential_task(
"grandpa-voter",
grandpa::run_grandpa_voter(grandpa_config)?
);
},
Expand Down
1 change: 1 addition & 0 deletions client/service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ wasmtime = [
derive_more = "0.99.2"
futures01 = { package = "futures", version = "0.1.29" }
futures = "0.3.1"
futures-diagnose = "1.0"
parking_lot = "0.9.0"
lazy_static = "1.4.0"
log = "0.4.8"
Expand Down
62 changes: 42 additions & 20 deletions client/service/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ use sp_runtime::traits::{
use sp_api::ProvideRuntimeApi;
use sc_executor::{NativeExecutor, NativeExecutionDispatch};
use std::{
borrow::Cow,
io::{Read, Write, Seek},
marker::PhantomData, sync::Arc, time::SystemTime, pin::Pin
};
Expand Down Expand Up @@ -791,7 +792,7 @@ ServiceBuilder<

// List of asynchronous tasks to spawn. We collect them, then spawn them all at once.
let (to_spawn_tx, to_spawn_rx) =
mpsc::unbounded::<Pin<Box<dyn Future<Output = ()> + Send>>>();
mpsc::unbounded::<(Pin<Box<dyn Future<Output = ()> + Send>>, Cow<'static, str>)>();

// A side-channel for essential tasks to communicate shutdown.
let (essential_failed_tx, essential_failed_rx) = mpsc::unbounded();
Expand All @@ -816,7 +817,7 @@ ServiceBuilder<
imports_external_transactions: !config.roles.is_light(),
pool: transaction_pool.clone(),
client: client.clone(),
executor: Arc::new(SpawnTaskHandle { sender: to_spawn_tx.clone(), on_exit: exit.clone() }),
executor: SpawnTaskHandle { sender: to_spawn_tx.clone(), on_exit: exit.clone() },
});

let protocol_id = {
Expand All @@ -840,7 +841,7 @@ ServiceBuilder<
executor: {
let to_spawn_tx = to_spawn_tx.clone();
Some(Box::new(move |fut| {
if let Err(e) = to_spawn_tx.unbounded_send(fut) {
if let Err(e) = to_spawn_tx.unbounded_send((fut, From::from("libp2p-node"))) {
error!("Failed to spawn libp2p background task: {:?}", e);
}
}))
Expand Down Expand Up @@ -891,7 +892,10 @@ ServiceBuilder<
&BlockId::hash(notification.hash),
&notification.retracted,
);
let _ = to_spawn_tx_.unbounded_send(Box::pin(future));
let _ = to_spawn_tx_.unbounded_send((
Box::pin(future),
From::from("txpool-maintain")
));
}

let offchain = offchain.as_ref().and_then(|o| o.upgrade());
Expand All @@ -901,12 +905,18 @@ ServiceBuilder<
network_state_info.clone(),
is_validator
);
let _ = to_spawn_tx_.unbounded_send(Box::pin(future));
let _ = to_spawn_tx_.unbounded_send((
Box::pin(future),
From::from("offchain-on-block")
));
}

ready(())
});
let _ = to_spawn_tx.unbounded_send(Box::pin(select(events, exit.clone()).map(drop)));
let _ = to_spawn_tx.unbounded_send((
Box::pin(select(events, exit.clone()).map(drop)),
From::from("txpool-and-offchain-notif")
));
}

{
Expand All @@ -926,7 +936,10 @@ ServiceBuilder<
ready(())
});

let _ = to_spawn_tx.unbounded_send(Box::pin(select(events, exit.clone()).map(drop)));
let _ = to_spawn_tx.unbounded_send((
Box::pin(select(events, exit.clone()).map(drop)),
From::from("telemetry-on-block")
));
}

// Periodically notify the telemetry.
Expand Down Expand Up @@ -990,7 +1003,10 @@ ServiceBuilder<

ready(())
});
let _ = to_spawn_tx.unbounded_send(Box::pin(select(tel_task, exit.clone()).map(drop)));
let _ = to_spawn_tx.unbounded_send((
Box::pin(select(tel_task, exit.clone()).map(drop)),
From::from("telemetry-periodic-send")
));

// Periodically send the network state to the telemetry.
let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>();
Expand All @@ -1003,7 +1019,10 @@ ServiceBuilder<
);
ready(())
});
let _ = to_spawn_tx.unbounded_send(Box::pin(select(tel_task_2, exit.clone()).map(drop)));
let _ = to_spawn_tx.unbounded_send((
Box::pin(select(tel_task_2, exit.clone()).map(drop)),
From::from("telemetry-periodic-network-state")
));

// RPC
let (system_rpc_tx, system_rpc_rx) = mpsc::unbounded();
Expand Down Expand Up @@ -1066,14 +1085,17 @@ ServiceBuilder<
let rpc = start_rpc_servers(&config, gen_handler)?;


let _ = to_spawn_tx.unbounded_send(Box::pin(select(build_network_future(
config.roles,
network_mut,
client.clone(),
network_status_sinks.clone(),
system_rpc_rx,
has_bootnodes,
), exit.clone()).map(drop)));
let _ = to_spawn_tx.unbounded_send((
Box::pin(select(build_network_future(
config.roles,
network_mut,
client.clone(),
network_status_sinks.clone(),
system_rpc_rx,
has_bootnodes,
), exit.clone()).map(drop)),
From::from("network-worker")
));

let telemetry_connection_sinks: Arc<Mutex<Vec<futures::channel::mpsc::UnboundedSender<()>>>> = Default::default();

Expand Down Expand Up @@ -1114,9 +1136,9 @@ ServiceBuilder<
});
ready(())
});
let _ = to_spawn_tx.unbounded_send(Box::pin(select(
let _ = to_spawn_tx.unbounded_send((Box::pin(select(
future, exit.clone()
).map(drop)));
).map(drop)), From::from("telemetry-worker")));
telemetry
});

Expand All @@ -1127,7 +1149,7 @@ ServiceBuilder<
exit.clone()
).map(drop);

let _ = to_spawn_tx.unbounded_send(Box::pin(future));
let _ = to_spawn_tx.unbounded_send((Box::pin(future), From::from("grafana-server")));
}

// Instrumentation
Expand Down
Loading