From 7d2c7d54d7109298ab3381e0db8f6338f3cd5496 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 11:19:55 +0100 Subject: [PATCH 01/33] drop in tracing to replace log --- Cargo.lock | 39 ++++++++++--------- node/collation-generation/Cargo.toml | 2 +- node/collation-generation/src/lib.rs | 14 +++---- node/core/av-store/Cargo.toml | 2 +- node/core/av-store/src/lib.rs | 30 +++++++------- node/core/av-store/src/tests.rs | 6 +-- node/core/backing/Cargo.toml | 2 +- node/core/backing/src/lib.rs | 4 +- node/core/bitfield-signing/Cargo.toml | 2 +- node/core/bitfield-signing/src/lib.rs | 4 +- node/core/candidate-selection/Cargo.toml | 2 +- node/core/candidate-selection/src/lib.rs | 12 +++--- node/core/candidate-validation/Cargo.toml | 2 +- node/core/candidate-validation/src/lib.rs | 2 +- node/core/proposer/Cargo.toml | 2 +- node/core/proposer/src/lib.rs | 2 +- node/core/provisioner/Cargo.toml | 2 +- node/core/provisioner/src/lib.rs | 4 +- .../availability-distribution/Cargo.toml | 2 +- .../availability-distribution/src/lib.rs | 2 +- .../availability-distribution/src/tests.rs | 20 +++++----- node/network/bitfield-distribution/Cargo.toml | 2 +- node/network/bitfield-distribution/src/lib.rs | 2 +- node/network/bridge/Cargo.toml | 2 +- node/network/bridge/src/lib.rs | 16 ++++---- .../network/bridge/src/validator_discovery.rs | 2 +- node/network/collator-protocol/Cargo.toml | 2 +- .../collator-protocol/src/collator_side.rs | 4 +- node/network/collator-protocol/src/lib.rs | 2 +- .../collator-protocol/src/validator_side.rs | 10 ++--- node/network/pov-distribution/Cargo.toml | 2 +- node/network/pov-distribution/src/lib.rs | 6 +-- .../network/statement-distribution/Cargo.toml | 2 +- .../network/statement-distribution/src/lib.rs | 8 ++-- node/overseer/Cargo.toml | 3 +- node/overseer/examples/minimal-example.rs | 12 +++--- node/overseer/src/lib.rs | 8 ++-- node/service/Cargo.toml | 2 +- node/service/src/lib.rs | 2 +- node/subsystem-test-helpers/Cargo.toml | 2 +- node/subsystem-util/Cargo.toml | 7 ++-- node/subsystem-util/src/lib.rs | 16 ++++---- .../subsystem-util/src/validator_discovery.rs | 2 +- node/subsystem/Cargo.toml | 2 +- node/test/service/Cargo.toml | 2 +- 45 files changed, 138 insertions(+), 137 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d571ac2fd02a..ad1237822956 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4815,7 +4815,6 @@ dependencies = [ "bitvec", "env_logger 0.7.1", "futures 0.3.5", - "log 0.4.11", "maplit", "parity-scale-codec", "polkadot-node-network-protocol", @@ -4828,6 +4827,7 @@ dependencies = [ "sp-core", "sp-keystore", "tempfile", + "tracing", ] [[package]] @@ -4838,7 +4838,6 @@ dependencies = [ "env_logger 0.7.1", "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.11", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -4853,6 +4852,7 @@ dependencies = [ "sp-keyring", "sp-keystore", "thiserror", + "tracing", ] [[package]] @@ -4884,7 +4884,6 @@ dependencies = [ "env_logger 0.7.1", "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.11", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -4894,6 +4893,7 @@ dependencies = [ "sp-core", "sp-keyring", "thiserror", + "tracing", ] [[package]] @@ -4925,7 +4925,6 @@ dependencies = [ "assert_matches", "async-trait", "futures 0.3.5", - "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "polkadot-node-network-protocol", @@ -4937,6 +4936,7 @@ dependencies = [ "sp-core", "sp-keyring", "sp-runtime", + "tracing", ] [[package]] @@ -4944,7 +4944,6 @@ name = "polkadot-node-collation-generation" version = "0.1.0" dependencies = [ "futures 0.3.5", - "log 0.4.11", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -4953,6 +4952,7 @@ dependencies = [ "polkadot-primitives", "sp-core", "thiserror", + "tracing", ] [[package]] @@ -4966,7 +4966,6 @@ dependencies = [ "kvdb", "kvdb-memorydb", "kvdb-rocksdb", - "log 0.4.11", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-subsystem", @@ -4978,6 +4977,7 @@ dependencies = [ "smallvec 1.4.2", "sp-core", "thiserror", + "tracing", ] [[package]] @@ -4987,7 +4987,6 @@ dependencies = [ "assert_matches", "bitvec", "futures 0.3.5", - "log 0.4.11", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -5001,6 +5000,7 @@ dependencies = [ "sp-keyring", "sp-keystore", "thiserror", + "tracing", ] [[package]] @@ -5009,12 +5009,12 @@ version = "0.1.0" dependencies = [ "derive_more", "futures 0.3.5", - "log 0.4.11", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-keystore", "thiserror", + "tracing", "wasm-timer", ] @@ -5023,12 +5023,12 @@ name = "polkadot-node-core-candidate-selection" version = "0.1.0" dependencies = [ "futures 0.3.5", - "log 0.4.11", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-core", "thiserror", + "tracing", ] [[package]] @@ -5037,7 +5037,6 @@ version = "0.1.0" dependencies = [ "assert_matches", "futures 0.3.5", - "log 0.4.11", "parity-scale-codec", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -5047,6 +5046,7 @@ dependencies = [ "polkadot-primitives", "sp-core", "sp-keyring", + "tracing", ] [[package]] @@ -5069,7 +5069,6 @@ version = "0.1.0" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.11", "polkadot-node-subsystem", "polkadot-overseer", "polkadot-primitives", @@ -5084,6 +5083,7 @@ dependencies = [ "sp-runtime", "sp-transaction-pool", "substrate-prometheus-endpoint", + "tracing", ] [[package]] @@ -5093,13 +5093,13 @@ dependencies = [ "bitvec", "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.11", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-application-crypto", "sp-keystore", "thiserror", + "tracing", ] [[package]] @@ -5146,7 +5146,6 @@ dependencies = [ "derive_more", "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "pin-project 0.4.23", @@ -5160,6 +5159,7 @@ dependencies = [ "sp-core", "substrate-prometheus-endpoint", "thiserror", + "tracing", ] [[package]] @@ -5169,7 +5169,6 @@ dependencies = [ "async-trait", "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "pin-project 0.4.23", @@ -5182,6 +5181,7 @@ dependencies = [ "sc-network", "smallvec 1.4.2", "sp-core", + "tracing", ] [[package]] @@ -5208,6 +5208,7 @@ dependencies = [ "streamunordered", "substrate-prometheus-endpoint", "thiserror", + "tracing", ] [[package]] @@ -5219,7 +5220,6 @@ dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", "kv-log-macro", - "log 0.4.11", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -5228,6 +5228,7 @@ dependencies = [ "sc-client-api", "sp-core", "streamunordered", + "tracing", ] [[package]] @@ -5258,13 +5259,13 @@ version = "0.1.0" dependencies = [ "assert_matches", "futures 0.3.5", - "log 0.4.11", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-core", + "tracing", ] [[package]] @@ -5499,7 +5500,6 @@ dependencies = [ "hex-literal 0.2.1", "kusama-runtime", "lazy_static", - "log 0.4.11", "pallet-babe", "pallet-im-online", "pallet-staking", @@ -5565,6 +5565,7 @@ dependencies = [ "sp-transaction-pool", "sp-trie", "substrate-prometheus-endpoint", + "tracing", "westend-runtime", ] @@ -5576,7 +5577,6 @@ dependencies = [ "assert_matches", "futures 0.3.5", "indexmap", - "log 0.4.11", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -5589,6 +5589,7 @@ dependencies = [ "sp-keyring", "sp-keystore", "sp-staking", + "tracing", ] [[package]] @@ -5692,7 +5693,6 @@ dependencies = [ "futures 0.1.29", "futures 0.3.5", "hex", - "log 0.4.11", "pallet-balances", "pallet-staking", "pallet-transaction-payment", @@ -5734,6 +5734,7 @@ dependencies = [ "substrate-test-utils", "tempfile", "tokio 0.2.21", + "tracing", ] [[package]] diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml index 0e302ae6884f..abc77727ab56 100644 --- a/node/collation-generation/Cargo.toml +++ b/node/collation-generation/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.8" +tracing = "0.1.21" polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 9fe91b140f49..620c0069f449 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -95,7 +95,7 @@ impl CollationGenerationSubsystem { msg = receiver.next().fuse() => { if let Some(msg) = msg { if let Err(err) = ctx.send_message(msg).await { - log::warn!(target: LOG_TARGET, "failed to forward message to overseer: {:?}", err); + tracing::warn!(target: LOG_TARGET, "failed to forward message to overseer: {:?}", err); break; } } @@ -129,7 +129,7 @@ impl CollationGenerationSubsystem { if let Err(err) = handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await { - log::warn!(target: LOG_TARGET, "failed to handle new activations: {}", err); + tracing::warn!(target: LOG_TARGET, "failed to handle new activations: {}", err); }; } false @@ -139,7 +139,7 @@ impl CollationGenerationSubsystem { msg: CollationGenerationMessage::Initialize(config), }) => { if self.config.is_some() { - log::error!(target: LOG_TARGET, "double initialization"); + tracing::error!(target: LOG_TARGET, "double initialization"); } else { self.config = Some(Arc::new(config)); } @@ -147,7 +147,7 @@ impl CollationGenerationSubsystem { } Ok(Signal(BlockFinalized(_))) => false, Err(err) => { - log::error!( + tracing::error!( target: LOG_TARGET, "error receiving message from subsystem context: {:?}", err @@ -237,7 +237,7 @@ async fn handle_new_activations( let collation = match (task_config.collator)(relay_parent, &validation_data).await { Some(collation) => collation, None => { - log::debug!( + tracing::debug!( target: LOG_TARGET, "collator returned no collation on collate for para_id {}.", scheduled_core.para_id, @@ -262,7 +262,7 @@ async fn handle_new_activations( ) { Ok(erasure_root) => erasure_root, Err(err) => { - log::error!( + tracing::error!( target: LOG_TARGET, "failed to calculate erasure root for para_id {}: {:?}", scheduled_core.para_id, @@ -299,7 +299,7 @@ async fn handle_new_activations( if let Err(err) = task_sender.send(AllMessages::CollatorProtocol( CollatorProtocolMessage::DistributeCollation(ccr, collation.proof_of_validity) )).await { - log::warn!( + tracing::warn!( target: LOG_TARGET, "failed to send collation result for para_id {}: {:?}", scheduled_core.para_id, diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml index 8c22eab5a75b..2be581f45086 100644 --- a/node/core/av-store/Cargo.toml +++ b/node/core/av-store/Cargo.toml @@ -9,8 +9,8 @@ futures = "0.3.5" futures-timer = "3.0.2" kvdb = "0.7.0" kvdb-rocksdb = "0.9.1" -log = "0.4.11" thiserror = "1.0.21" +tracing = "0.1.21" codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 57bd6d804c68..38d30cfd51bc 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -73,13 +73,13 @@ enum Error { } impl Error { - fn severity(&self) -> log::Level { + fn trace(&self) { match self { // don't spam the log with spurious errors Self::RuntimeApi(_) | - Self::Oneshot(_) => log::Level::Debug, + Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, "{:?}", self), // it's worth reporting otherwise - _ => log::Level::Warn, + _ => tracing::warn!(target: LOG_TARGET, "{:?}", self), } } } @@ -316,13 +316,13 @@ impl AvailabilityStoreSubsystem { let mut pov_pruning = pov_pruning(&self.inner).unwrap_or_default(); let now = PruningDelay::now()?; - log::trace!(target: LOG_TARGET, "Pruning PoVs"); + tracing::trace!(target: LOG_TARGET, "Pruning PoVs"); let outdated_records_count = pov_pruning.iter() .take_while(|r| r.prune_at <= now) .count(); for record in pov_pruning.drain(..outdated_records_count) { - log::trace!(target: LOG_TARGET, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, "Removing record {:?}", record); tx.delete( columns::DATA, available_data_key(&record.candidate_hash).as_slice(), @@ -340,13 +340,13 @@ impl AvailabilityStoreSubsystem { let mut chunk_pruning = chunk_pruning(&self.inner).unwrap_or_default(); let now = PruningDelay::now()?; - log::trace!(target: LOG_TARGET, "Pruning Chunks"); + tracing::trace!(target: LOG_TARGET, "Pruning Chunks"); let outdated_records_count = chunk_pruning.iter() .take_while(|r| r.prune_at <= now) .count(); for record in chunk_pruning.drain(..outdated_records_count) { - log::trace!(target: LOG_TARGET, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, "Removing record {:?}", record); tx.delete( columns::DATA, erasure_chunk_key(&record.candidate_hash, record.chunk_index).as_slice(), @@ -481,10 +481,10 @@ where let res = run_iteration(&mut subsystem, &mut ctx).await; match res { Err(e) => { - log::log!(target: LOG_TARGET, e.severity(), "{}", e); + e.trace(); } Ok(true) => { - log::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); + tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); break; }, Ok(false) => continue, @@ -561,7 +561,7 @@ where // numbers we have to iterate through the whole collection here. for record in pov_pruning.iter_mut() { if record.block_number <= block_number { - log::trace!( + tracing::trace!( target: LOG_TARGET, "Updating pruning record for finalized block {}", record.block_number, @@ -580,7 +580,7 @@ where if let Some(mut chunk_pruning) = chunk_pruning(db) { for record in chunk_pruning.iter_mut() { if record.block_number <= block_number { - log::trace!( + tracing::trace!( target: LOG_TARGET, "Updating chunk pruning record for finalized block {}", record.block_number, @@ -610,17 +610,17 @@ where let events = match request_candidate_events(ctx, hash).await { Ok(events) => events, Err(err) => { - log::debug!(target: LOG_TARGET, "requesting candidate events failed due to {}", err); + tracing::debug!(target: LOG_TARGET, "requesting candidate events failed due to {}", err); return Ok(()); } }; - log::trace!(target: LOG_TARGET, "block activated {}", hash); + tracing::trace!(target: LOG_TARGET, "block activated {}", hash); let mut included = HashSet::new(); for event in events.into_iter() { if let CandidateEvent::CandidateIncluded(receipt, _) = event { - log::trace!(target: LOG_TARGET, "Candidate {:?} was included", receipt.hash()); + tracing::trace!(target: LOG_TARGET, "Candidate {:?} was included", receipt.hash()); included.insert(receipt.hash()); } } @@ -996,7 +996,7 @@ fn query_inner( } Ok(None) => None, Err(e) => { - log::warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e); + tracing::warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e); None } } diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs index 9493e910db17..3d2e5d07d533 100644 --- a/node/core/av-store/src/tests.rs +++ b/node/core/av-store/src/tests.rs @@ -127,7 +127,7 @@ async fn overseer_send( overseer: &mut test_helpers::TestSubsystemContextHandle, msg: AvailabilityStoreMessage, ) { - log::trace!("Sending message:\n{:?}", &msg); + tracing::trace!("Sending message:\n{:?}", &msg); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -142,7 +142,7 @@ async fn overseer_recv( .await .expect(&format!("{:?} is more than enough to receive messages", TIMEOUT)); - log::trace!("Received message:\n{:?}", &msg); + tracing::trace!("Received message:\n{:?}", &msg); msg } @@ -151,7 +151,7 @@ async fn overseer_recv_with_timeout( overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - log::trace!("Waiting for message..."); + tracing::trace!("Waiting for message..."); overseer .recv() .timeout(timeout) diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index 8892e201503c..e3eba3db58f5 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -14,7 +14,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -log = "0.4.11" +tracing = "0.1.21" thiserror = "1.0.21" [dev-dependencies] diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index c3ab45c98025..acc133759e12 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -780,7 +780,7 @@ impl util::JobTrait for CandidateBackingJob { match $x { Ok(x) => x, Err(e) => { - log::warn!( + tracing::warn!( target: LOG_TARGET, "Failed to fetch runtime API data for job: {:?}", e, @@ -820,7 +820,7 @@ impl util::JobTrait for CandidateBackingJob { Ok(v) => v, Err(util::Error::NotAValidator) => { return Ok(()) }, Err(e) => { - log::warn!( + tracing::warn!( target: LOG_TARGET, "Cannot participate in candidate backing: {:?}", e diff --git a/node/core/bitfield-signing/Cargo.toml b/node/core/bitfield-signing/Cargo.toml index ef778186a473..8c9ffef15f3d 100644 --- a/node/core/bitfield-signing/Cargo.toml +++ b/node/core/bitfield-signing/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index c43915b74888..92ce13ac1616 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -164,7 +164,7 @@ async fn get_core_availability( Ok(None) => return Ok(false), Err(e) => { // Don't take down the node on runtime API errors. - log::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", e); + tracing::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", e); return Ok(false); } }; @@ -293,7 +293,7 @@ impl JobTrait for BitfieldSigningJob { { Err(Error::Runtime(runtime_err)) => { // Don't take down the node on runtime API errors. - log::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", runtime_err); + tracing::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", runtime_err); return Ok(()); } Err(err) => return Err(err), diff --git a/node/core/candidate-selection/Cargo.toml b/node/core/candidate-selection/Cargo.toml index 5a15550ddb4b..be631c217329 100644 --- a/node/core/candidate-selection/Cargo.toml +++ b/node/core/candidate-selection/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" thiserror = "1.0.21" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index f35478ba9c13..736d57a7fac1 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -212,7 +212,7 @@ impl CandidateSelectionJob { ).await { Ok(response) => response, Err(err) => { - log::warn!( + tracing::warn!( target: TARGET, "failed to get collation from collator protocol subsystem: {:?}", err @@ -230,7 +230,7 @@ impl CandidateSelectionJob { ) .await { - Err(err) => log::warn!(target: TARGET, "failed to second a candidate: {:?}", err), + Err(err) => tracing::warn!(target: TARGET, "failed to second a candidate: {:?}", err), Ok(()) => self.seconded_candidate = Some(collator_id), } } @@ -240,14 +240,14 @@ impl CandidateSelectionJob { let received_from = match &self.seconded_candidate { Some(peer) => peer, None => { - log::warn!( + tracing::warn!( target: TARGET, "received invalidity notice for a candidate we don't remember seconding" ); return; } }; - log::info!( + tracing::info!( target: TARGET, "received invalidity note for candidate {:?}", candidate_receipt @@ -255,7 +255,7 @@ impl CandidateSelectionJob { let result = if let Err(err) = forward_invalidity_note(received_from, &mut self.sender).await { - log::warn!( + tracing::warn!( target: TARGET, "failed to forward invalidity note: {:?}", err @@ -305,7 +305,7 @@ async fn second_candidate( .await { Err(err) => { - log::warn!(target: TARGET, "failed to send a seconding message"); + tracing::warn!(target: TARGET, "failed to send a seconding message"); metrics.on_second(Err(())); Err(err.into()) } diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index 87cb639a3601..c38eefc01298 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" sp-core = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" } parity-scale-codec = { version = "1.3.0", default-features = false, features = ["bit-vec", "derive"] } diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index 2a03a0facf05..f74b36112b81 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -139,7 +139,7 @@ async fn run( Ok(x) => { metrics.on_validation_event(&x); if let Err(_e) = response_sender.send(x) { - log::warn!( + tracing::warn!( target: LOG_TARGET, "Requester of candidate validation dropped", ) diff --git a/node/core/proposer/Cargo.toml b/node/core/proposer/Cargo.toml index 6028d931c12f..15fc3d097aa6 100644 --- a/node/core/proposer/Cargo.toml +++ b/node/core/proposer/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.4" futures-timer = "3.0.2" -log = "0.4.8" +tracing = "0.1.21" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } diff --git a/node/core/proposer/src/lib.rs b/node/core/proposer/src/lib.rs index a4cd2eb5c01a..958ea2c8abd6 100644 --- a/node/core/proposer/src/lib.rs +++ b/node/core/proposer/src/lib.rs @@ -193,7 +193,7 @@ where let provisioner_data = match self.get_provisioner_data().await { Ok(pd) => pd, Err(err) => { - log::warn!("could not get provisioner inherent data; injecting default data: {}", err); + tracing::warn!("could not get provisioner inherent data; injecting default data: {}", err); Default::default() } }; diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml index 0260b0c4386e..833d94826a34 100644 --- a/node/core/provisioner/Cargo.toml +++ b/node/core/provisioner/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" thiserror = "1.0.21" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 58695d61d998..ed76287ac2ec 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -205,7 +205,7 @@ impl ProvisioningJob { ) .await { - log::warn!(target: LOG_TARGET, "failed to assemble or send inherent data: {:?}", err); + tracing::warn!(target: LOG_TARGET, "failed to assemble or send inherent data: {:?}", err); self.metrics.on_inherent_data_request(Err(())); } else { self.metrics.on_inherent_data_request(Ok(())); @@ -460,7 +460,7 @@ fn bitfields_indicate_availability( // in principle, this function might return a `Result` so that we can more clearly express this error condition // however, in practice, that would just push off an error-handling routine which would look a whole lot like this one. // simpler to just handle the error internally here. - log::warn!( + tracing::warn!( target: LOG_TARGET, "attempted to set a transverse bit at idx {} which is greater than bitfield size {}", validator_idx, diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml index 1f032b7f054b..047050ccb812 100644 --- a/node/network/availability-distribution/Cargo.toml +++ b/node/network/availability-distribution/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" codec = { package="parity-scale-codec", version = "1.3.4", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index 3c86911b2578..ecda2c49f5df 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -30,7 +30,7 @@ use futures::{channel::oneshot, FutureExt, TryFutureExt}; use sp_core::crypto::Public; use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; -use log::{trace, warn}; +use tracing::{trace, warn}; use polkadot_erasure_coding::branch_hash; use polkadot_node_network_protocol::{ v1 as protocol_v1, NetworkBridgeEvent, PeerId, ReputationChange as Rep, View, diff --git a/node/network/availability-distribution/src/tests.rs b/node/network/availability-distribution/src/tests.rs index ba3988d58318..6186dd0cee78 100644 --- a/node/network/availability-distribution/src/tests.rs +++ b/node/network/availability-distribution/src/tests.rs @@ -103,7 +103,7 @@ async fn overseer_send( overseer: &mut test_helpers::TestSubsystemContextHandle, msg: AvailabilityDistributionMessage, ) { - log::trace!("Sending message:\n{:?}", &msg); + tracing::trace!("Sending message:\n{:?}", &msg); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -114,13 +114,13 @@ async fn overseer_send( async fn overseer_recv( overseer: &mut test_helpers::TestSubsystemContextHandle, ) -> AllMessages { - log::trace!("Waiting for message ..."); + tracing::trace!("Waiting for message ..."); let msg = overseer .recv() .timeout(TIMEOUT) .await .expect("TIMEOUT is enough to recv."); - log::trace!("Received message:\n{:?}", &msg); + tracing::trace!("Received message:\n{:?}", &msg); msg } @@ -438,11 +438,11 @@ fn reputation_verification() { let peer_b = PeerId::random(); assert_ne!(&peer_a, &peer_b); - log::trace!("peer A: {:?}", peer_a); - log::trace!("peer B: {:?}", peer_b); + tracing::trace!("peer A: {:?}", peer_a); + tracing::trace!("peer B: {:?}", peer_b); - log::trace!("candidate A: {:?}", candidates[0].hash()); - log::trace!("candidate B: {:?}", candidates[1].hash()); + tracing::trace!("candidate A: {:?}", candidates[0].hash()); + tracing::trace!("candidate B: {:?}", candidates[1].hash()); overseer_signal( &mut virtual_overseer, @@ -626,7 +626,7 @@ fn reputation_verification() { let mut candidates2 = candidates.clone(); // check if the availability store can provide the desired erasure chunks for i in 0usize..2 { - log::trace!("0000"); + tracing::trace!("0000"); let avail_data = make_available_data(&test_state, pov_block_a.clone()); let chunks = derive_erasure_chunks_with_proofs(test_state.validators.len(), &avail_data); @@ -651,10 +651,10 @@ fn reputation_verification() { assert_eq!(chunks.len(), test_state.validators.len()); - log::trace!("xxxx"); + tracing::trace!("xxxx"); // retrieve a stored chunk for (j, chunk) in chunks.into_iter().enumerate() { - log::trace!("yyyy i={}, j={}", i, j); + tracing::trace!("yyyy i={}, j={}", i, j); if i != 0 { // not a validator, so this never happens break; diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index 41e34cb2f7cf..a47cf93dd0cd 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" codec = { package="parity-scale-codec", version = "1.3.4" } polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 7e94adee2b31..a4229ce563ab 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -25,7 +25,7 @@ use codec::{Decode, Encode}; use futures::{channel::oneshot, FutureExt}; -use log::{debug, trace, warn}; +use tracing::{debug, trace, warn}; use polkadot_subsystem::messages::*; use polkadot_subsystem::{ ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemResult, diff --git a/node/network/bridge/Cargo.toml b/node/network/bridge/Cargo.toml index 32b41133574c..ed5d10f7eabb 100644 --- a/node/network/bridge/Cargo.toml +++ b/node/network/bridge/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] async-trait = "0.1" futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" polkadot-primitives = { path = "../../../primitives" } parity-scale-codec = "1.3.4" sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 9ce841f0838f..c15c50fedcb7 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -291,7 +291,7 @@ fn action_from_overseer_message( Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_))) => Action::Nop, Err(e) => { - log::warn!(target: TARGET, "Shutting down Network Bridge due to error {:?}", e); + tracing::warn!(target: TARGET, "Shutting down Network Bridge due to error {:?}", e); Action::Abort } } @@ -300,7 +300,7 @@ fn action_from_overseer_message( fn action_from_network_message(event: Option) -> Action { match event { None => { - log::info!(target: TARGET, "Shutting down Network Bridge: underlying event stream concluded"); + tracing::info!(target: TARGET, "Shutting down Network Bridge: underlying event stream concluded"); Action::Abort } Some(NetworkEvent::Dht(_)) => Action::Nop, @@ -384,7 +384,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - log::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -392,7 +392,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - log::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -691,7 +691,7 @@ where }; if let Err(e) = res { - log::warn!("Aborting - Failure to dispatch messages to overseer"); + tracing::warn!("Aborting - Failure to dispatch messages to overseer"); return Err(e); } } @@ -718,7 +718,7 @@ where }; if let Err(e) = res { - log::warn!( + tracing::warn!( target: TARGET, "Aborting - Failure to dispatch messages to overseer", ); @@ -739,7 +739,7 @@ where events, &mut ctx, ).await { - log::warn!( + tracing::warn!( target: TARGET, "Aborting - Failure to dispatch messages to overseer", ); @@ -759,7 +759,7 @@ where events, &mut ctx, ).await { - log::warn!( + tracing::warn!( target: TARGET, "Aborting - Failure to dispatch messages to overseer", ); diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index da5491bf6d6e..28d127721494 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -283,7 +283,7 @@ impl Service { PRIORITY_GROUP.to_owned(), multiaddr_to_add, ).await { - log::warn!(target: super::TARGET, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); + tracing::warn!(target: super::TARGET, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); } // the addresses are known to be valid let _ = network_service.remove_from_priority_group(PRIORITY_GROUP.to_owned(), multiaddr_to_remove).await; diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index 3b10015e1545..7044349b2052 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" thiserror = "1.0.21" diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index f0b974c82293..ecbc42ece402 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use super::{LOG_TARGET, Result}; use futures::{StreamExt, task::Poll}; -use log::warn; +use tracing::warn; use polkadot_primitives::v1::{ CollatorId, CoreIndex, CoreState, Hash, Id as ParaId, CandidateReceipt, @@ -682,7 +682,7 @@ mod tests { use assert_matches::assert_matches; use futures::{executor, future, Future}; - use log::trace; + use tracing::trace; use smallvec::smallvec; use sp_core::crypto::Pair; diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 2d4f51aa0484..877d94ad1d4d 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -21,7 +21,7 @@ use std::time::Duration; use futures::{channel::oneshot, FutureExt, TryFutureExt}; -use log::trace; +use tracing::trace; use thiserror::Error; use polkadot_subsystem::{ diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index 9b95c0ee4641..86347d3698e9 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -25,7 +25,7 @@ use futures::{ future::BoxFuture, stream::FuturesUnordered, }; -use log::{trace, warn}; +use tracing::{trace, warn}; use polkadot_primitives::v1::{ Id as ParaId, CandidateReceipt, CollatorId, Hash, PoV, @@ -784,7 +784,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, msg: CollatorProtocolMessage, ) { - log::trace!("Sending message:\n{:?}", &msg); + tracing::trace!("Sending message:\n{:?}", &msg); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -799,7 +799,7 @@ mod tests { .await .expect(&format!("{:?} is enough to receive messages.", TIMEOUT)); - log::trace!("Received message:\n{:?}", &msg); + tracing::trace!("Received message:\n{:?}", &msg); msg } @@ -808,7 +808,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - log::trace!("Waiting for message..."); + tracing::trace!("Waiting for message..."); overseer .recv() .timeout(timeout) @@ -826,7 +826,7 @@ mod tests { } = test_harness; let pair = CollatorPair::generate().0; - log::trace!("activating"); + tracing::trace!("activating"); overseer_send( &mut virtual_overseer, diff --git a/node/network/pov-distribution/Cargo.toml b/node/network/pov-distribution/Cargo.toml index 2449a63016f7..4bcf0eab12a2 100644 --- a/node/network/pov-distribution/Cargo.toml +++ b/node/network/pov-distribution/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index e28013a06a4d..8096cf549f26 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -134,7 +134,7 @@ async fn handle_signal( let n_validators = match vals_rx.await? { Ok(v) => v.len(), Err(e) => { - log::warn!( + tracing::warn!( target: LOG_TARGET, "Error fetching validators from runtime API for active leaf: {:?}", e @@ -286,7 +286,7 @@ async fn handle_fetch( } if relay_parent_state.fetching.len() > 2 * relay_parent_state.n_validators { - log::warn!("Other subsystems have requested PoV distribution to \ + tracing::warn!("Other subsystems have requested PoV distribution to \ fetch more PoVs than reasonably expected: {}", relay_parent_state.fetching.len()); return Ok(()); } @@ -359,7 +359,7 @@ async fn handle_awaiting( let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { None => { - log::warn!("PoV Distribution relay parent state out-of-sync with our view"); + tracing::warn!("PoV Distribution relay parent state out-of-sync with our view"); return Ok(()); } Some(s) => s, diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index fa0b3786d014..8a83e0681a6f 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" polkadot-primitives = { path = "../../../primitives" } node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" } sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index bb4c192c4ff4..41dc16841615 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -111,7 +111,7 @@ impl VcPerPeerTracker { /// based on a message that we have sent it from our local pool. fn note_local(&mut self, h: CandidateHash) { if !note_hash(&mut self.local_observed, h) { - log::warn!("Statement distribution is erroneously attempting to distribute more \ + tracing::warn!("Statement distribution is erroneously attempting to distribute more \ than {} candidate(s) per validator index. Ignoring", VC_THRESHOLD); } } @@ -688,7 +688,7 @@ async fn handle_incoming_message<'a>( None => { // This should never be out-of-sync with our view if the view updates // correspond to actual `StartWork` messages. So we just log and ignore. - log::warn!("Our view out-of-sync with active heads. Head {} not found", relay_parent); + tracing::warn!("Our view out-of-sync with active heads. Head {} not found", relay_parent); return Ok(None); } }; @@ -847,7 +847,7 @@ async fn handle_network_update( for new in our_view.difference(&old_view) { if !active_heads.contains_key(&new) { - log::warn!(target: LOG_TARGET, "Our network bridge view update \ + tracing::warn!(target: LOG_TARGET, "Our network bridge view update \ inconsistent with `StartWork` messages we have received from overseer. \ Contains unknown hash {}", new); } @@ -899,7 +899,7 @@ impl StatementDistribution { match (val_rx.await?, session_rx.await?) { (Ok(v), Ok(s)) => (v, s), (Err(e), _) | (_, Err(e)) => { - log::warn!( + tracing::warn!( target: LOG_TARGET, "Failed to fetch runtime API data for active leaf: {:?}", e, diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index 5341cb42e014..16e0318eee65 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.5" -log = "0.4.11" +tracing = "0.1.21" futures-timer = "3.0.2" streamunordered = "0.5.1" polkadot-primitives = { path = "../../primitives" } @@ -22,5 +22,4 @@ polkadot-node-network-protocol = { path = "../network/protocol" } futures = { version = "0.3.5", features = ["thread-pool"] } futures-timer = "3.0.2" femme = "2.1.1" -log = "0.4.11" kv-log-macro = "1.0.7" diff --git a/node/overseer/examples/minimal-example.rs b/node/overseer/examples/minimal-example.rs index dd923f5df86b..03d810da2f0a 100644 --- a/node/overseer/examples/minimal-example.rs +++ b/node/overseer/examples/minimal-example.rs @@ -43,13 +43,13 @@ impl Subsystem1 { match ctx.try_recv().await { Ok(Some(msg)) => { if let FromOverseer::Communication { msg } = msg { - log::info!("msg {:?}", msg); + tracing::info!("msg {:?}", msg); } continue; } Ok(None) => (), Err(_) => { - log::info!("exiting"); + tracing::info!("exiting"); return; } } @@ -94,7 +94,7 @@ impl Subsystem2 { "subsystem-2-job", Box::pin(async { loop { - log::info!("Job tick"); + tracing::info!("Job tick"); Delay::new(Duration::from_secs(1)).await; } }), @@ -103,12 +103,12 @@ impl Subsystem2 { loop { match ctx.try_recv().await { Ok(Some(msg)) => { - log::info!("Subsystem2 received message {:?}", msg); + tracing::info!("Subsystem2 received message {:?}", msg); continue; } Ok(None) => { pending!(); } Err(_) => { - log::info!("exiting"); + tracing::info!("exiting"); return; }, } @@ -159,7 +159,7 @@ fn main() { select! { _ = overseer_fut => break, _ = timer_stream.next() => { - log::info!("tick"); + tracing::info!("tick"); } complete => break, } diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 01e19bc65b5e..c63f6b44184f 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -1337,7 +1337,7 @@ where // Some subsystem exited? It's time to panic. if let Poll::Ready(Some(finished)) = poll!(self.running_subsystems.next()) { - log::error!(target: LOG_TARGET, "Subsystem finished unexpectedly {:?}", finished); + tracing::error!(target: LOG_TARGET, "Subsystem finished unexpectedly {:?}", finished); self.stop().await; return finished; } @@ -1607,9 +1607,9 @@ fn spawn( let fut = Box::pin(async move { if let Err(e) = future.await { - log::error!("Subsystem {} exited with error {:?}", name, e); + tracing::error!("Subsystem {} exited with error {:?}", name, e); } else { - log::debug!("Subsystem {} exited without an error", name); + tracing::debug!("Subsystem {} exited without an error", name); } let _ = tx.send(()); }); @@ -1617,7 +1617,7 @@ fn spawn( spawner.spawn(name, fut); let _ = streams.push(from_rx); - futures.push(Box::pin(rx.map(|e| { log::warn!("Dropping error {:?}", e); Ok(()) }))); + futures.push(Box::pin(rx.map(|e| { tracing::warn!("Dropping error {:?}", e); Ok(()) }))); let instance = Some(SubsystemInstance { tx: to_tx, diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index c7ac6fe2daa8..afb04a80547e 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -55,7 +55,7 @@ codec = { package = "parity-scale-codec", version = "1.3.4" } futures = "0.3.4" hex-literal = "0.2.1" lazy_static = "1.4.0" -log = "0.4.8" +tracing = "0.1.21" parking_lot = "0.9.0" serde = { version = "1.0.102", features = ["derive"] } slog = "2.5.2" diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 0c351d1aab61..ed62a58ae928 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -28,7 +28,7 @@ use { std::convert::TryInto, std::time::Duration, - log::info, + tracing::info, polkadot_node_core_av_store::Config as AvailabilityConfig, polkadot_node_core_proposer::ProposerFactory, polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler}, diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index 24f49ec3a1c7..2b9f506d4624 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -9,7 +9,7 @@ description = "Subsystem traits and message definitions" async-trait = "0.1" futures = "0.3.5" futures-timer = "3.0.2" -log = "0.4.8" +tracing = "0.1.21" parity-scale-codec = "1.3.4" parking_lot = "0.10.0" pin-project = "0.4.23" diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index f03cea134827..907f3a5bedd6 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -9,12 +9,12 @@ description = "Subsystem traits and message definitions" async-trait = "0.1" futures = "0.3.5" futures-timer = "3.0.2" -log = "0.4.11" -thiserror = "1.0.21" parity-scale-codec = "1.3.4" parking_lot = { version = "0.10.0", optional = true } pin-project = "0.4.22" streamunordered = "0.5.1" +thiserror = "1.0.21" +tracing = "0.1.21" polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } @@ -29,7 +29,8 @@ substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate [dev-dependencies] assert_matches = "1.3.0" async-trait = "0.1" +env_logger = "0.7.1" futures = { version = "0.3.5", features = ["thread-pool"] } +log = "0.4.11" parking_lot = "0.10.0" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -env_logger = "0.7.1" diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index bb84ef1171de..cdbf4b956981 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -579,7 +579,7 @@ impl Jobs { let (future, abort_handle) = future::abortable(async move { if let Err(e) = Job::run(parent_hash, run_args, metrics, to_job_rx, from_job_tx).await { - log::error!( + tracing::error!( "{}({}) finished with an error {:?}", Job::NAME, parent_hash, @@ -591,7 +591,7 @@ impl Jobs { // there's no point trying to propagate this error onto the channel too // all we can do is warn that error propagation has failed if let Err(e) = err_tx.send((Some(parent_hash), JobsError::Job(e))).await { - log::warn!("failed to forward error: {:?}", e); + tracing::warn!("failed to forward error: {:?}", e); } } } @@ -632,7 +632,7 @@ impl Jobs { async fn send_msg(&mut self, parent_hash: Hash, msg: Job::ToJob) { if let Entry::Occupied(mut job) = self.running.entry(parent_hash) { if job.get_mut().send_msg(msg).await.is_err() { - log::debug!("failed to send message to job ({}), will remove it", Job::NAME); + tracing::debug!("failed to send message to job ({}), will remove it", Job::NAME); job.remove(); } } @@ -767,7 +767,7 @@ where // if we can't send on the error transmission channel, we can't do anything useful about it // still, we can at least log the failure if let Err(e) = err_tx.send((hash, err)).await { - log::warn!("failed to forward error: {:?}", e); + tracing::warn!("failed to forward error: {:?}", e); } } } @@ -792,7 +792,7 @@ where for hash in activated { let metrics = metrics.clone(); if let Err(e) = jobs.spawn_job(hash, run_args.clone(), metrics) { - log::error!("Failed to spawn a job({}): {:?}", Job::NAME, e); + tracing::error!("Failed to spawn a job({}): {:?}", Job::NAME, e); Self::fwd_err(Some(hash), JobsError::Utility(e), err_tx).await; return true; } @@ -821,7 +821,7 @@ where .forward(drain()) .await { - log::error!("failed to stop all jobs ({}) on conclude signal: {:?}", Job::NAME, e); + tracing::error!("failed to stop all jobs ({}) on conclude signal: {:?}", Job::NAME, e); let e = Error::from(e); Self::fwd_err(None, JobsError::Utility(e), err_tx).await; } @@ -832,7 +832,7 @@ where if let Ok(to_job) = ::try_from(msg) { match to_job.relay_parent() { Some(hash) => jobs.send_msg(hash, to_job).await, - None => log::debug!( + None => tracing::debug!( "Trying to send a message to a job ({}) without specifying a relay parent.", Job::NAME, ), @@ -841,7 +841,7 @@ where } Ok(Signal(BlockFinalized(_))) => {} Err(err) => { - log::error!("error receiving message from subsystem context for job ({}): {:?}", Job::NAME, err); + tracing::error!("error receiving message from subsystem context for job ({}): {:?}", Job::NAME, err); Self::fwd_err(None, JobsError::Utility(Error::from(err)), err_tx).await; return true; } diff --git a/node/subsystem-util/src/validator_discovery.rs b/node/subsystem-util/src/validator_discovery.rs index ac5bf1d47066..a5bd4b0fc7c0 100644 --- a/node/subsystem-util/src/validator_discovery.rs +++ b/node/subsystem-util/src/validator_discovery.rs @@ -150,7 +150,7 @@ impl ConnectionRequest { /// This can be done either by calling this function or dropping the request. pub fn revoke(self) { if let Err(_) = self.revoke.send(()) { - log::warn!( + tracing::warn!( "Failed to revoke a validator connection request", ); } diff --git a/node/subsystem/Cargo.toml b/node/subsystem/Cargo.toml index f578d98633b6..1d2aabe9ad91 100644 --- a/node/subsystem/Cargo.toml +++ b/node/subsystem/Cargo.toml @@ -10,7 +10,7 @@ async-trait = "0.1" derive_more = "0.99.11" futures = "0.3.5" futures-timer = "3.0.2" -log = "0.4.8" +tracing = "0.1.21" parity-scale-codec = "1.3.4" parking_lot = { version = "0.10.0", optional = true } pin-project = "0.4.22" diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index 0dbffe78e166..8faf5da4e511 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" futures = "0.3.4" futures01 = { package = "futures", version = "0.1.29" } hex = "0.4" -log = "0.4.8" +tracing = "0.1.21" rand = "0.7.3" tempfile = "3.1.0" From cb7bd56c457940c69623cbcdb27fa59d68cc3621 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 12:05:34 +0100 Subject: [PATCH 02/33] add structured logging to trace messages --- node/core/av-store/src/lib.rs | 14 +++++++++---- node/network/bitfield-distribution/src/lib.rs | 21 +++++++++++-------- node/network/collator-protocol/src/lib.rs | 2 ++ .../collator-protocol/src/validator_side.rs | 15 +++++++++---- 4 files changed, 35 insertions(+), 17 deletions(-) diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 38d30cfd51bc..5c158ceeca46 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -322,7 +322,7 @@ impl AvailabilityStoreSubsystem { .count(); for record in pov_pruning.drain(..outdated_records_count) { - tracing::trace!(target: LOG_TARGET, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record {:?}", record); tx.delete( columns::DATA, available_data_key(&record.candidate_hash).as_slice(), @@ -346,7 +346,7 @@ impl AvailabilityStoreSubsystem { .count(); for record in chunk_pruning.drain(..outdated_records_count) { - tracing::trace!(target: LOG_TARGET, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record {:?}", record); tx.delete( columns::DATA, erasure_chunk_key(&record.candidate_hash, record.chunk_index).as_slice(), @@ -563,6 +563,7 @@ where if record.block_number <= block_number { tracing::trace!( target: LOG_TARGET, + block_number = %record.block_number, "Updating pruning record for finalized block {}", record.block_number, ); @@ -582,6 +583,7 @@ where if record.block_number <= block_number { tracing::trace!( target: LOG_TARGET, + block_number = %record.block_number, "Updating chunk pruning record for finalized block {}", record.block_number, ); @@ -615,12 +617,16 @@ where } }; - tracing::trace!(target: LOG_TARGET, "block activated {}", hash); + tracing::trace!(target: LOG_TARGET, hash = ?hash, "block activated: {:?}", hash); let mut included = HashSet::new(); for event in events.into_iter() { if let CandidateEvent::CandidateIncluded(receipt, _) = event { - tracing::trace!(target: LOG_TARGET, "Candidate {:?} was included", receipt.hash()); + tracing::trace!( + target: LOG_TARGET, + hash = ?receipt.hash(), + "Candidate {:?} was included", receipt.hash(), + ); included.insert(receipt.hash()); } } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index a4229ce563ab..5bf78c6a71ac 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -167,7 +167,7 @@ impl BitfieldDistribution { &mut ctx, &mut state, &self.metrics, - hash, + hash, signed_availability, ).await { warn!(target: LOG_TARGET, "Failed to reply to `DistributeBitfield` message: {}", err); @@ -184,7 +184,7 @@ impl BitfieldDistribution { } FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { for relay_parent in activated { - trace!(target: LOG_TARGET, "Start {:?}", relay_parent); + trace!(target: LOG_TARGET, relay_parent = ?relay_parent, "Start {:?}", relay_parent); // query basic system parameters once match query_basics(&mut ctx, relay_parent).await { Ok(Some((validator_set, signing_context))) => { @@ -210,12 +210,12 @@ impl BitfieldDistribution { } for relay_parent in deactivated { - trace!(target: LOG_TARGET, "Stop {:?}", relay_parent); + trace!(target: LOG_TARGET, relay_parent=?relay_parent, "Stop {:?}", relay_parent); // defer the cleanup to the view change } } FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => { - trace!(target: LOG_TARGET, "Block finalized {:?}", hash); + trace!(target: LOG_TARGET, hash=?hash, "Block finalized {:?}", hash); } FromOverseer::Signal(OverseerSignal::Conclude) => { trace!(target: LOG_TARGET, "Conclude"); @@ -235,7 +235,7 @@ async fn modify_reputation( where Context: SubsystemContext, { - trace!(target: LOG_TARGET, "Reputation change of {:?} for peer {:?}", rep, peer); + trace!(target: LOG_TARGET, rep=?rep, peer=?peer, "Reputation change of {:?} for peer {:?}", rep, peer); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), )) @@ -262,15 +262,15 @@ where } else { trace!( target: LOG_TARGET, - "Not supposed to work on relay parent {} related data", - relay_parent + relay_parent=?relay_parent, + "Not supposed to work on relay parent related data", ); return Ok(()); }; let validator_set = &job_data.validator_set; if validator_set.is_empty() { - trace!(target: LOG_TARGET, "Validator set for {:?} is empty", relay_parent); + trace!(target: LOG_TARGET, relay_parent=?relay_parent, "Validator set for {:?} is empty", relay_parent); return Ok(()); } @@ -344,6 +344,7 @@ where if interested_peers.is_empty() { trace!( target: LOG_TARGET, + relay_parent = ?message.relay_parent, "No peers are interested in gossip for relay parent {:?}", message.relay_parent ); @@ -387,6 +388,7 @@ where if validator_set.is_empty() { trace!( target: LOG_TARGET, + relay_parent = ?message.relay_parent, "Validator set for relay parent {:?} is empty", &message.relay_parent ); @@ -429,6 +431,7 @@ where if one_per_validator.get(&validator).is_some() { trace!( target: LOG_TARGET, + validator_index, "Already received a message for validator at index {}", validator_index ); @@ -474,7 +477,7 @@ where NetworkBridgeEvent::PeerMessage(remote, message) => { match message { protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) => { - trace!(target: LOG_TARGET, "Received bitfield gossip from peer {:?}", &remote); + trace!(target: LOG_TARGET, peer=?remote, "Received bitfield gossip from peer {:?}", &remote); let gossiped_bitfield = BitfieldGossipMessage { relay_parent, signed_availability: bitfield, diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 877d94ad1d4d..ae7f55c1aa11 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -141,6 +141,8 @@ where { trace!( target: LOG_TARGET, + rep=?rep, + peer=?peer, "Reputation change of {:?} for peer {:?}", rep, peer, ); diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index 86347d3698e9..3c582887df09 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -208,6 +208,7 @@ where // may have closed the results channel for some reason. trace!( target: LOG_TARGET, + err=?e, "Failed to send collation: {:?}", e, ); } @@ -382,7 +383,10 @@ where if !state.view.contains(&relay_parent) { trace!( target: LOG_TARGET, - "Collation by {} on {} on relay parent {} is no longer in view", + peer_id=%peer_id, + para_id=%para_id, + relay_parent=?relay_parent, + "Collation by {} on {} on relay parent {:?} is no longer in view", peer_id, para_id, relay_parent, ); return Ok(()); @@ -391,7 +395,10 @@ where if state.requested_collations.contains_key(&(relay_parent, para_id.clone(), peer_id.clone())) { trace!( target: LOG_TARGET, - "Collation by {} on {} on relay parent {} has already been requested", + peer_id=%peer_id, + para_id=%para_id, + relay_parent=?relay_parent, + "Collation by {} on {} on relay parent {:?} has already been requested", peer_id, para_id, relay_parent, ); return Ok(()); @@ -671,7 +678,7 @@ where loop { if let Poll::Ready(msg) = futures::poll!(ctx.recv()) { let msg = msg?; - trace!(target: LOG_TARGET, "Received a message {:?}", msg); + trace!(target: LOG_TARGET, msg=?msg, "Received a message {:?}", msg); match msg { Communication { msg } => process_msg(&mut ctx, msg, &mut state).await?, @@ -687,7 +694,7 @@ where // if the chain has not moved on yet. match request { CollationRequestResult::Timeout(id) => { - trace!(target: LOG_TARGET, "Request timed out {}", id); + trace!(target: LOG_TARGET, id, "Request timed out {}", id); request_timed_out(&mut ctx, &mut state, id).await?; } CollationRequestResult::Received(id) => { From 9596ca75e28417a552f4f4db2fe98cfd232ccb9a Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 12:42:02 +0100 Subject: [PATCH 03/33] add structured logging to debug messages --- node/collation-generation/src/lib.rs | 1 + node/core/av-store/src/lib.rs | 2 +- node/network/bitfield-distribution/src/lib.rs | 2 +- node/overseer/src/lib.rs | 4 ++-- node/subsystem-util/src/lib.rs | 3 ++- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 620c0069f449..48ae6b1059aa 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -239,6 +239,7 @@ async fn handle_new_activations( None => { tracing::debug!( target: LOG_TARGET, + para_id = %scheduled_core.para_id, "collator returned no collation on collate for para_id {}.", scheduled_core.para_id, ); diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 5c158ceeca46..17a361ff6c25 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -612,7 +612,7 @@ where let events = match request_candidate_events(ctx, hash).await { Ok(events) => events, Err(err) => { - tracing::debug!(target: LOG_TARGET, "requesting candidate events failed due to {}", err); + tracing::debug!(target: LOG_TARGET, err=?err, "requesting candidate events failed due to {}", err); return Ok(()); } }; diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 5bf78c6a71ac..5ca0551c4d5a 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -154,7 +154,7 @@ impl BitfieldDistribution { let message = match ctx.recv().await { Ok(message) => message, Err(e) => { - debug!(target: LOG_TARGET, "Failed to receive a message from Overseer: {}, exiting", e); + debug!(target: LOG_TARGET, err=?e, "Failed to receive a message from Overseer: {}, exiting", e); return; }, }; diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index c63f6b44184f..cded8cfe2b2e 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -1607,9 +1607,9 @@ fn spawn( let fut = Box::pin(async move { if let Err(e) = future.await { - tracing::error!("Subsystem {} exited with error {:?}", name, e); + tracing::error!(subsystem=name, err=?e, "Subsystem {} exited with error {:?}", name, e); } else { - tracing::debug!("Subsystem {} exited without an error", name); + tracing::debug!(subsystem=name, "Subsystem {} exited without an error", name); } let _ = tx.send(()); }); diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index cdbf4b956981..f0740c5021ac 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -632,7 +632,7 @@ impl Jobs { async fn send_msg(&mut self, parent_hash: Hash, msg: Job::ToJob) { if let Entry::Occupied(mut job) = self.running.entry(parent_hash) { if job.get_mut().send_msg(msg).await.is_err() { - tracing::debug!("failed to send message to job ({}), will remove it", Job::NAME); + tracing::debug!(job=Job::NAME, "failed to send message to job ({}), will remove it", Job::NAME); job.remove(); } } @@ -833,6 +833,7 @@ where match to_job.relay_parent() { Some(hash) => jobs.send_msg(hash, to_job).await, None => tracing::debug!( + job=Job::NAME, "Trying to send a message to a job ({}) without specifying a relay parent.", Job::NAME, ), From 718c3bc3979328534743992580c59a9608167f34 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 12:43:28 +0100 Subject: [PATCH 04/33] add structured logging to info messages --- node/core/candidate-selection/src/lib.rs | 1 + node/service/src/lib.rs | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index 736d57a7fac1..4e30e8acc302 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -249,6 +249,7 @@ impl CandidateSelectionJob { }; tracing::info!( target: TARGET, + candidate_receipt=?candidate_receipt, "received invalidity note for candidate {:?}", candidate_receipt ); diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index ed62a58ae928..dce61304a57d 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -701,7 +701,10 @@ pub fn new_full( // given delay. let voting_rule = match grandpa_pause { Some((block, delay)) => { - info!("GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", + info!( + block=%block, + delay=%delay, + "GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", block, delay, ); From edb43e50e148b2ce72472bf6cb9abc9753348f2a Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 13:18:58 +0100 Subject: [PATCH 05/33] add structured logging to warn messages --- node/collation-generation/src/lib.rs | 6 ++++-- node/core/av-store/src/lib.rs | 2 +- node/core/backing/src/lib.rs | 2 ++ node/core/bitfield-signing/src/lib.rs | 4 ++-- node/core/candidate-selection/src/lib.rs | 6 ++++-- node/core/proposer/src/lib.rs | 2 +- node/core/provisioner/src/lib.rs | 4 +++- .../network/availability-distribution/src/lib.rs | 1 + node/network/bitfield-distribution/src/lib.rs | 9 +++++---- node/network/bridge/src/lib.rs | 11 +++++++---- node/network/bridge/src/validator_discovery.rs | 2 +- .../collator-protocol/src/collator_side.rs | 14 +++++++++++++- .../collator-protocol/src/validator_side.rs | 2 ++ node/network/pov-distribution/src/lib.rs | 8 ++++++-- node/network/statement-distribution/src/lib.rs | 16 +++++++++++++--- node/overseer/src/lib.rs | 2 +- node/subsystem-util/src/lib.rs | 4 ++-- 17 files changed, 68 insertions(+), 27 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 48ae6b1059aa..4e5ae1d1e014 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -95,7 +95,7 @@ impl CollationGenerationSubsystem { msg = receiver.next().fuse() => { if let Some(msg) = msg { if let Err(err) = ctx.send_message(msg).await { - tracing::warn!(target: LOG_TARGET, "failed to forward message to overseer: {:?}", err); + tracing::warn!(target: LOG_TARGET, err=?err, "failed to forward message to overseer: {:?}", err); break; } } @@ -129,7 +129,7 @@ impl CollationGenerationSubsystem { if let Err(err) = handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await { - tracing::warn!(target: LOG_TARGET, "failed to handle new activations: {}", err); + tracing::warn!(target: LOG_TARGET, err=?err, "failed to handle new activations: {}", err); }; } false @@ -302,6 +302,8 @@ async fn handle_new_activations( )).await { tracing::warn!( target: LOG_TARGET, + para_id= %scheduled_core.para_id, + err=?err, "failed to send collation result for para_id {}: {:?}", scheduled_core.para_id, err diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 17a361ff6c25..77b97b6aa08f 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -1002,7 +1002,7 @@ fn query_inner( } Ok(None) => None, Err(e) => { - tracing::warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e); + tracing::warn!(target: LOG_TARGET, err=?e, "Error reading from the availability store: {:?}", e); None } } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index acc133759e12..653f086a8983 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -782,6 +782,7 @@ impl util::JobTrait for CandidateBackingJob { Err(e) => { tracing::warn!( target: LOG_TARGET, + err=?e, "Failed to fetch runtime API data for job: {:?}", e, ); @@ -822,6 +823,7 @@ impl util::JobTrait for CandidateBackingJob { Err(e) => { tracing::warn!( target: LOG_TARGET, + err=?e, "Cannot participate in candidate backing: {:?}", e ); diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 92ce13ac1616..347f8b7ab296 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -164,7 +164,7 @@ async fn get_core_availability( Ok(None) => return Ok(false), Err(e) => { // Don't take down the node on runtime API errors. - tracing::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", e); + tracing::warn!(target: LOG_TARGET, err=?e, "Encountered a runtime API error: {:?}", e); return Ok(false); } }; @@ -293,7 +293,7 @@ impl JobTrait for BitfieldSigningJob { { Err(Error::Runtime(runtime_err)) => { // Don't take down the node on runtime API errors. - tracing::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", runtime_err); + tracing::warn!(target: LOG_TARGET, err=?runtime_err, "Encountered a runtime API error: {:?}", runtime_err); return Ok(()); } Err(err) => return Err(err), diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index 4e30e8acc302..f2c9ec4789d7 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -214,6 +214,7 @@ impl CandidateSelectionJob { Err(err) => { tracing::warn!( target: TARGET, + err=?err, "failed to get collation from collator protocol subsystem: {:?}", err ); @@ -230,7 +231,7 @@ impl CandidateSelectionJob { ) .await { - Err(err) => tracing::warn!(target: TARGET, "failed to second a candidate: {:?}", err), + Err(err) => tracing::warn!(target: TARGET, err=?err, "failed to second a candidate: {:?}", err), Ok(()) => self.seconded_candidate = Some(collator_id), } } @@ -258,6 +259,7 @@ impl CandidateSelectionJob { if let Err(err) = forward_invalidity_note(received_from, &mut self.sender).await { tracing::warn!( target: TARGET, + err=?err, "failed to forward invalidity note: {:?}", err ); @@ -306,7 +308,7 @@ async fn second_candidate( .await { Err(err) => { - tracing::warn!(target: TARGET, "failed to send a seconding message"); + tracing::warn!(target: TARGET, err=?err, "failed to send a seconding message"); metrics.on_second(Err(())); Err(err.into()) } diff --git a/node/core/proposer/src/lib.rs b/node/core/proposer/src/lib.rs index 958ea2c8abd6..b39e0bd23477 100644 --- a/node/core/proposer/src/lib.rs +++ b/node/core/proposer/src/lib.rs @@ -193,7 +193,7 @@ where let provisioner_data = match self.get_provisioner_data().await { Ok(pd) => pd, Err(err) => { - tracing::warn!("could not get provisioner inherent data; injecting default data: {}", err); + tracing::warn!(err=?err, "could not get provisioner inherent data; injecting default data: {}", err); Default::default() } }; diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index ed76287ac2ec..45b45457c209 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -205,7 +205,7 @@ impl ProvisioningJob { ) .await { - tracing::warn!(target: LOG_TARGET, "failed to assemble or send inherent data: {:?}", err); + tracing::warn!(target: LOG_TARGET, err=?err, "failed to assemble or send inherent data: {:?}", err); self.metrics.on_inherent_data_request(Err(())); } else { self.metrics.on_inherent_data_request(Ok(())); @@ -462,6 +462,8 @@ fn bitfields_indicate_availability( // simpler to just handle the error internally here. tracing::warn!( target: LOG_TARGET, + validator_idx=%validator_idx, + availability_len=%availability_len, "attempted to set a transverse bit at idx {} which is greater than bitfield size {}", validator_idx, availability_len, diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index ecda2c49f5df..b04f2189a6da 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -797,6 +797,7 @@ impl AvailabilityDistributionSubsystem { { warn!( target: TARGET, + err=?e, "Failed to handle incoming network messages: {:?}", e ); } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 5ca0551c4d5a..7d7413020f32 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -170,7 +170,7 @@ impl BitfieldDistribution { hash, signed_availability, ).await { - warn!(target: LOG_TARGET, "Failed to reply to `DistributeBitfield` message: {}", err); + warn!(target: LOG_TARGET, err=?err, "Failed to reply to `DistributeBitfield` message: {}", err); } } FromOverseer::Communication { @@ -179,7 +179,7 @@ impl BitfieldDistribution { trace!(target: LOG_TARGET, "Processing NetworkMessage"); // a network message was received if let Err(e) = handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await { - warn!(target: LOG_TARGET, "Failed to handle incoming network messages: {:?}", e); + warn!(target: LOG_TARGET, err=?e, "Failed to handle incoming network messages: {:?}", e); } } FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { @@ -203,7 +203,7 @@ impl BitfieldDistribution { ); } Err(e) => { - warn!(target: LOG_TARGET, "query_basics has failed: {}", e); + warn!(target: LOG_TARGET, err=?e, "query_basics has failed: {}", e); } _ => {}, } @@ -498,6 +498,7 @@ fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemRes if !state.per_relay_parent.contains_key(&added) { warn!( target: LOG_TARGET, + added=%added, "Our view contains {} but the overseer never told use we should work on this", &added ); @@ -642,7 +643,7 @@ where SigningContext { parent_hash: relay_parent, session_index: s }, ))), (Err(e), _) | (_, Err(e)) => { - warn!(target: LOG_TARGET, "Failed to fetch basics from runtime API: {:?}", e); + warn!(target: LOG_TARGET, err=?e, "Failed to fetch basics from runtime API: {:?}", e); Ok(None) } } diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index c15c50fedcb7..313017a79e05 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -291,7 +291,7 @@ fn action_from_overseer_message( Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_))) => Action::Nop, Err(e) => { - tracing::warn!(target: TARGET, "Shutting down Network Bridge due to error {:?}", e); + tracing::warn!(target: TARGET, err=?e, "Shutting down Network Bridge due to error {:?}", e); Action::Abort } } @@ -384,7 +384,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - tracing::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: TARGET, err=?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -392,7 +392,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - tracing::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: TARGET, err=?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -691,7 +691,7 @@ where }; if let Err(e) = res { - tracing::warn!("Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(err=?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e); } } @@ -720,6 +720,7 @@ where if let Err(e) = res { tracing::warn!( target: TARGET, + err=?e, "Aborting - Failure to dispatch messages to overseer", ); return Err(e) @@ -741,6 +742,7 @@ where ).await { tracing::warn!( target: TARGET, + err=?e, "Aborting - Failure to dispatch messages to overseer", ); return Err(e) @@ -761,6 +763,7 @@ where ).await { tracing::warn!( target: TARGET, + err=?e, "Aborting - Failure to dispatch messages to overseer", ); return Err(e) diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 28d127721494..9cef5d8d3cc9 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -283,7 +283,7 @@ impl Service { PRIORITY_GROUP.to_owned(), multiaddr_to_add, ).await { - tracing::warn!(target: super::TARGET, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); + tracing::warn!(target: super::TARGET, err=?e, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); } // the addresses are known to be valid let _ = network_service.remove_from_priority_group(PRIORITY_GROUP.to_owned(), multiaddr_to_remove).await; diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index ecbc42ece402..46081266b286 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -154,6 +154,7 @@ where if !state.view.contains(&relay_parent) { warn!( target: LOG_TARGET, + relay_parent=?relay_parent, "Distribute collation message parent {:?} is outside of our view", relay_parent, ); @@ -173,6 +174,8 @@ where None => { warn!( target: LOG_TARGET, + id=?id, + relay_parent=?relay_parent, "Looks like no core is assigned to {:?} at {:?}", id, relay_parent, ); return Ok(()); @@ -185,6 +188,7 @@ where None => { warn!( target: LOG_TARGET, + core=?our_core, "There are no validators assigned to {:?} core", our_core, ); @@ -379,6 +383,8 @@ where // the one we expect, we ignore the message. warn!( target: LOG_TARGET, + para=?receipt.descriptor.para_id, + collating_on=?id, "DistributeCollation message for para {:?} while collating on {:?}", receipt.descriptor.para_id, id, @@ -390,6 +396,7 @@ where None => { warn!( target: LOG_TARGET, + para=?receipt.descriptor.para_id, "DistributeCollation message for para {:?} while not collating on any", receipt.descriptor.para_id, ); @@ -422,6 +429,7 @@ where ).await { warn!( target: LOG_TARGET, + err=?e, "Failed to handle incoming network message: {:?}", e, ); } @@ -496,6 +504,8 @@ where } else { warn!( target: LOG_TARGET, + for_para_id=?para_id, + our_para_id=?our_para_id, "Received a RequestCollation for {:?} while collating on {:?}", para_id, our_para_id, ); @@ -504,6 +514,7 @@ where None => { warn!( target: LOG_TARGET, + for_para_id=?para_id, "Received a RequestCollation for {:?} while not collating on any para", para_id, ); @@ -648,6 +659,7 @@ where if let Err(err) = handle_validator_connected(&mut ctx, &mut state, peer_id, validator_id).await { warn!( target: LOG_TARGET, + err=?err, "Failed to declare our collator id: {:?}", err, ); @@ -661,7 +673,7 @@ where match msg? { Communication { msg } => { if let Err(e) = process_msg(&mut ctx, &mut state, msg).await { - warn!(target: LOG_TARGET, "Failed to process message: {}", e); + warn!(target: LOG_TARGET, err=?e, "Failed to process message: {}", e); } }, Signal(ActiveLeaves(_update)) => {} diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index 3c582887df09..5d7cbc5384ad 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -622,6 +622,7 @@ where CollateOn(id) => { warn!( target: LOG_TARGET, + id=%id, "CollateOn({}) message is not expected on the validator side of the protocol", id, ); } @@ -648,6 +649,7 @@ where ).await { warn!( target: LOG_TARGET, + err=?e, "Failed to handle incoming network message: {:?}", e, ); } diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index 8096cf549f26..a7d46f90dd3f 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -136,6 +136,7 @@ async fn handle_signal( Err(e) => { tracing::warn!( target: LOG_TARGET, + err=?e, "Error fetching validators from runtime API for active leaf: {:?}", e ); @@ -286,8 +287,11 @@ async fn handle_fetch( } if relay_parent_state.fetching.len() > 2 * relay_parent_state.n_validators { - tracing::warn!("Other subsystems have requested PoV distribution to \ - fetch more PoVs than reasonably expected: {}", relay_parent_state.fetching.len()); + tracing::warn!( + fetching_len=relay_parent_state.fetching.len(), + "Other subsystems have requested PoV distribution to \ + fetch more PoVs than reasonably expected: {}", + relay_parent_state.fetching.len()); return Ok(()); } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 41dc16841615..6c45e521e087 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -688,7 +688,11 @@ async fn handle_incoming_message<'a>( None => { // This should never be out-of-sync with our view if the view updates // correspond to actual `StartWork` messages. So we just log and ignore. - tracing::warn!("Our view out-of-sync with active heads. Head {} not found", relay_parent); + tracing::warn!( + relay_parent=%relay_parent, + "Our view out-of-sync with active heads. Head {} not found", + relay_parent, + ); return Ok(None); } }; @@ -847,9 +851,14 @@ async fn handle_network_update( for new in our_view.difference(&old_view) { if !active_heads.contains_key(&new) { - tracing::warn!(target: LOG_TARGET, "Our network bridge view update \ + tracing::warn!( + target: LOG_TARGET, + unknown_hash=%new, + "Our network bridge view update \ inconsistent with `StartWork` messages we have received from overseer. \ - Contains unknown hash {}", new); + Contains unknown hash {}", + new, + ); } } @@ -901,6 +910,7 @@ impl StatementDistribution { (Err(e), _) | (_, Err(e)) => { tracing::warn!( target: LOG_TARGET, + err=?e, "Failed to fetch runtime API data for active leaf: {:?}", e, ); diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index cded8cfe2b2e..2ea44d0a632b 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -1617,7 +1617,7 @@ fn spawn( spawner.spawn(name, fut); let _ = streams.push(from_rx); - futures.push(Box::pin(rx.map(|e| { tracing::warn!("Dropping error {:?}", e); Ok(()) }))); + futures.push(Box::pin(rx.map(|e| { tracing::warn!(err=?e, "Dropping error {:?}", e); Ok(()) }))); let instance = Some(SubsystemInstance { tx: to_tx, diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index f0740c5021ac..aa5d508b68de 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -591,7 +591,7 @@ impl Jobs { // there's no point trying to propagate this error onto the channel too // all we can do is warn that error propagation has failed if let Err(e) = err_tx.send((Some(parent_hash), JobsError::Job(e))).await { - tracing::warn!("failed to forward error: {:?}", e); + tracing::warn!(err=?e, "failed to forward error: {:?}", e); } } } @@ -767,7 +767,7 @@ where // if we can't send on the error transmission channel, we can't do anything useful about it // still, we can at least log the failure if let Err(e) = err_tx.send((hash, err)).await { - tracing::warn!("failed to forward error: {:?}", e); + tracing::warn!(err=?e, "failed to forward error: {:?}", e); } } } From d3e329d34c2b524e356155ab32094376bf156a8e Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 13:22:41 +0100 Subject: [PATCH 06/33] add structured logging to error messages --- node/collation-generation/src/lib.rs | 3 +++ node/overseer/src/lib.rs | 2 +- node/subsystem-util/src/lib.rs | 27 ++++++++++++++++++++++++--- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 4e5ae1d1e014..06286276c7ed 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -149,6 +149,7 @@ impl CollationGenerationSubsystem { Err(err) => { tracing::error!( target: LOG_TARGET, + err=?err, "error receiving message from subsystem context: {:?}", err ); @@ -265,6 +266,8 @@ async fn handle_new_activations( Err(err) => { tracing::error!( target: LOG_TARGET, + para_id=%scheduled_core.para_id, + err=?err, "failed to calculate erasure root for para_id {}: {:?}", scheduled_core.para_id, err diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 2ea44d0a632b..7f998dda95c8 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -1337,7 +1337,7 @@ where // Some subsystem exited? It's time to panic. if let Poll::Ready(Some(finished)) = poll!(self.running_subsystems.next()) { - tracing::error!(target: LOG_TARGET, "Subsystem finished unexpectedly {:?}", finished); + tracing::error!(target: LOG_TARGET, subsystem=?finished, "Subsystem finished unexpectedly {:?}", finished); self.stop().await; return finished; } diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index aa5d508b68de..236ded7340fe 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -580,6 +580,9 @@ impl Jobs { let (future, abort_handle) = future::abortable(async move { if let Err(e) = Job::run(parent_hash, run_args, metrics, to_job_rx, from_job_tx).await { tracing::error!( + job=Job::NAME, + parent_hash=%parent_hash, + err=?e, "{}({}) finished with an error {:?}", Job::NAME, parent_hash, @@ -792,7 +795,13 @@ where for hash in activated { let metrics = metrics.clone(); if let Err(e) = jobs.spawn_job(hash, run_args.clone(), metrics) { - tracing::error!("Failed to spawn a job({}): {:?}", Job::NAME, e); + tracing::error!( + job=Job::NAME, + err=?e, + "Failed to spawn a job({}): {:?}", + Job::NAME, + e, + ); Self::fwd_err(Some(hash), JobsError::Utility(e), err_tx).await; return true; } @@ -821,7 +830,13 @@ where .forward(drain()) .await { - tracing::error!("failed to stop all jobs ({}) on conclude signal: {:?}", Job::NAME, e); + tracing::error!( + job=Job::NAME, + err=?e, + "failed to stop all jobs ({}) on conclude signal: {:?}", + Job::NAME, + e, + ); let e = Error::from(e); Self::fwd_err(None, JobsError::Utility(e), err_tx).await; } @@ -842,7 +857,13 @@ where } Ok(Signal(BlockFinalized(_))) => {} Err(err) => { - tracing::error!("error receiving message from subsystem context for job ({}): {:?}", Job::NAME, err); + tracing::error!( + job=Job::NAME, + err=?err, + "error receiving message from subsystem context for job ({}): {:?}", + Job::NAME, + err, + ); Self::fwd_err(None, JobsError::Utility(Error::from(err)), err_tx).await; return true; } From 7563ed20c2fbe023b29e02d1399907cfdad89479 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 14:31:05 +0100 Subject: [PATCH 07/33] normalize spacing and Display vs Debug --- core-primitives/src/lib.rs | 7 +++++ node/collation-generation/src/lib.rs | 14 ++++----- node/core/av-store/src/lib.rs | 8 ++--- node/core/backing/src/lib.rs | 4 +-- node/core/bitfield-signing/src/lib.rs | 4 +-- node/core/candidate-selection/src/lib.rs | 10 +++---- node/core/proposer/src/lib.rs | 2 +- node/core/provisioner/src/lib.rs | 6 ++-- .../availability-distribution/src/lib.rs | 2 +- node/network/bitfield-distribution/src/lib.rs | 30 +++++++++---------- node/network/bridge/src/lib.rs | 14 ++++----- .../network/bridge/src/validator_discovery.rs | 2 +- .../collator-protocol/src/collator_side.rs | 28 ++++++++--------- node/network/collator-protocol/src/lib.rs | 4 +-- .../collator-protocol/src/validator_side.rs | 20 ++++++------- node/network/pov-distribution/src/lib.rs | 2 +- .../network/statement-distribution/src/lib.rs | 6 ++-- node/overseer/src/lib.rs | 6 ++-- node/service/src/lib.rs | 4 +-- node/subsystem-util/src/lib.rs | 14 ++++----- 20 files changed, 97 insertions(+), 90 deletions(-) diff --git a/core-primitives/src/lib.rs b/core-primitives/src/lib.rs index fc325985e0f6..65dc3fcd0bfb 100644 --- a/core-primitives/src/lib.rs +++ b/core-primitives/src/lib.rs @@ -58,6 +58,13 @@ pub type Hash = sp_core::H256; #[derive(Clone, Copy, codec::Encode, codec::Decode, Hash, Eq, PartialEq, Debug, Default)] pub struct CandidateHash(pub Hash); +#[cfg(feature="std")] +impl std::fmt::Display for CandidateHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + /// Index of a transaction in the relay chain. 32-bit should be plenty. pub type Nonce = u32; diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 06286276c7ed..287f7d71c405 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -95,7 +95,7 @@ impl CollationGenerationSubsystem { msg = receiver.next().fuse() => { if let Some(msg) = msg { if let Err(err) = ctx.send_message(msg).await { - tracing::warn!(target: LOG_TARGET, err=?err, "failed to forward message to overseer: {:?}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to forward message to overseer: {:?}", err); break; } } @@ -129,7 +129,7 @@ impl CollationGenerationSubsystem { if let Err(err) = handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await { - tracing::warn!(target: LOG_TARGET, err=?err, "failed to handle new activations: {}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations: {}", err); }; } false @@ -149,7 +149,7 @@ impl CollationGenerationSubsystem { Err(err) => { tracing::error!( target: LOG_TARGET, - err=?err, + err = ?err, "error receiving message from subsystem context: {:?}", err ); @@ -266,8 +266,8 @@ async fn handle_new_activations( Err(err) => { tracing::error!( target: LOG_TARGET, - para_id=%scheduled_core.para_id, - err=?err, + para_id = %scheduled_core.para_id, + err = ?err, "failed to calculate erasure root for para_id {}: {:?}", scheduled_core.para_id, err @@ -305,8 +305,8 @@ async fn handle_new_activations( )).await { tracing::warn!( target: LOG_TARGET, - para_id= %scheduled_core.para_id, - err=?err, + para_id = %scheduled_core.para_id, + err = ?err, "failed to send collation result for para_id {}: {:?}", scheduled_core.para_id, err diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 77b97b6aa08f..c603ea82129b 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -612,19 +612,19 @@ where let events = match request_candidate_events(ctx, hash).await { Ok(events) => events, Err(err) => { - tracing::debug!(target: LOG_TARGET, err=?err, "requesting candidate events failed due to {}", err); + tracing::debug!(target: LOG_TARGET, err = ?err, "requesting candidate events failed due to {}", err); return Ok(()); } }; - tracing::trace!(target: LOG_TARGET, hash = ?hash, "block activated: {:?}", hash); + tracing::trace!(target: LOG_TARGET, hash = %hash, "block activated: {:?}", hash); let mut included = HashSet::new(); for event in events.into_iter() { if let CandidateEvent::CandidateIncluded(receipt, _) = event { tracing::trace!( target: LOG_TARGET, - hash = ?receipt.hash(), + hash = %receipt.hash(), "Candidate {:?} was included", receipt.hash(), ); included.insert(receipt.hash()); @@ -1002,7 +1002,7 @@ fn query_inner( } Ok(None) => None, Err(e) => { - tracing::warn!(target: LOG_TARGET, err=?e, "Error reading from the availability store: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Error reading from the availability store: {:?}", e); None } } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 653f086a8983..6340d48b99c0 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -782,7 +782,7 @@ impl util::JobTrait for CandidateBackingJob { Err(e) => { tracing::warn!( target: LOG_TARGET, - err=?e, + err = ?e, "Failed to fetch runtime API data for job: {:?}", e, ); @@ -823,7 +823,7 @@ impl util::JobTrait for CandidateBackingJob { Err(e) => { tracing::warn!( target: LOG_TARGET, - err=?e, + err = ?e, "Cannot participate in candidate backing: {:?}", e ); diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 347f8b7ab296..e84c0476121d 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -164,7 +164,7 @@ async fn get_core_availability( Ok(None) => return Ok(false), Err(e) => { // Don't take down the node on runtime API errors. - tracing::warn!(target: LOG_TARGET, err=?e, "Encountered a runtime API error: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Encountered a runtime API error: {:?}", e); return Ok(false); } }; @@ -293,7 +293,7 @@ impl JobTrait for BitfieldSigningJob { { Err(Error::Runtime(runtime_err)) => { // Don't take down the node on runtime API errors. - tracing::warn!(target: LOG_TARGET, err=?runtime_err, "Encountered a runtime API error: {:?}", runtime_err); + tracing::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error: {:?}", runtime_err); return Ok(()); } Err(err) => return Err(err), diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index f2c9ec4789d7..a9e375a13237 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -214,7 +214,7 @@ impl CandidateSelectionJob { Err(err) => { tracing::warn!( target: TARGET, - err=?err, + err = ?err, "failed to get collation from collator protocol subsystem: {:?}", err ); @@ -231,7 +231,7 @@ impl CandidateSelectionJob { ) .await { - Err(err) => tracing::warn!(target: TARGET, err=?err, "failed to second a candidate: {:?}", err), + Err(err) => tracing::warn!(target: TARGET, err = ?err, "failed to second a candidate: {:?}", err), Ok(()) => self.seconded_candidate = Some(collator_id), } } @@ -250,7 +250,7 @@ impl CandidateSelectionJob { }; tracing::info!( target: TARGET, - candidate_receipt=?candidate_receipt, + candidate_receipt = ?candidate_receipt, "received invalidity note for candidate {:?}", candidate_receipt ); @@ -259,7 +259,7 @@ impl CandidateSelectionJob { if let Err(err) = forward_invalidity_note(received_from, &mut self.sender).await { tracing::warn!( target: TARGET, - err=?err, + err = ?err, "failed to forward invalidity note: {:?}", err ); @@ -308,7 +308,7 @@ async fn second_candidate( .await { Err(err) => { - tracing::warn!(target: TARGET, err=?err, "failed to send a seconding message"); + tracing::warn!(target: TARGET, err = ?err, "failed to send a seconding message"); metrics.on_second(Err(())); Err(err.into()) } diff --git a/node/core/proposer/src/lib.rs b/node/core/proposer/src/lib.rs index b39e0bd23477..70d3117d7637 100644 --- a/node/core/proposer/src/lib.rs +++ b/node/core/proposer/src/lib.rs @@ -193,7 +193,7 @@ where let provisioner_data = match self.get_provisioner_data().await { Ok(pd) => pd, Err(err) => { - tracing::warn!(err=?err, "could not get provisioner inherent data; injecting default data: {}", err); + tracing::warn!(err = ?err, "could not get provisioner inherent data; injecting default data: {}", err); Default::default() } }; diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 45b45457c209..2aa56637677c 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -205,7 +205,7 @@ impl ProvisioningJob { ) .await { - tracing::warn!(target: LOG_TARGET, err=?err, "failed to assemble or send inherent data: {:?}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data: {:?}", err); self.metrics.on_inherent_data_request(Err(())); } else { self.metrics.on_inherent_data_request(Ok(())); @@ -462,8 +462,8 @@ fn bitfields_indicate_availability( // simpler to just handle the error internally here. tracing::warn!( target: LOG_TARGET, - validator_idx=%validator_idx, - availability_len=%availability_len, + validator_idx = %validator_idx, + availability_len = %availability_len, "attempted to set a transverse bit at idx {} which is greater than bitfield size {}", validator_idx, availability_len, diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index b04f2189a6da..e6f398e39e77 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -797,7 +797,7 @@ impl AvailabilityDistributionSubsystem { { warn!( target: TARGET, - err=?e, + err = ?e, "Failed to handle incoming network messages: {:?}", e ); } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 7d7413020f32..2eae1d321564 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -154,7 +154,7 @@ impl BitfieldDistribution { let message = match ctx.recv().await { Ok(message) => message, Err(e) => { - debug!(target: LOG_TARGET, err=?e, "Failed to receive a message from Overseer: {}, exiting", e); + debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer: {}, exiting", e); return; }, }; @@ -170,7 +170,7 @@ impl BitfieldDistribution { hash, signed_availability, ).await { - warn!(target: LOG_TARGET, err=?err, "Failed to reply to `DistributeBitfield` message: {}", err); + warn!(target: LOG_TARGET, err = ?err, "Failed to reply to `DistributeBitfield` message: {}", err); } } FromOverseer::Communication { @@ -179,12 +179,12 @@ impl BitfieldDistribution { trace!(target: LOG_TARGET, "Processing NetworkMessage"); // a network message was received if let Err(e) = handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await { - warn!(target: LOG_TARGET, err=?e, "Failed to handle incoming network messages: {:?}", e); + warn!(target: LOG_TARGET, err = ?e, "Failed to handle incoming network messages: {:?}", e); } } FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { for relay_parent in activated { - trace!(target: LOG_TARGET, relay_parent = ?relay_parent, "Start {:?}", relay_parent); + trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Start {:?}", relay_parent); // query basic system parameters once match query_basics(&mut ctx, relay_parent).await { Ok(Some((validator_set, signing_context))) => { @@ -203,19 +203,19 @@ impl BitfieldDistribution { ); } Err(e) => { - warn!(target: LOG_TARGET, err=?e, "query_basics has failed: {}", e); + warn!(target: LOG_TARGET, err = ?e, "query_basics has failed: {}", e); } _ => {}, } } for relay_parent in deactivated { - trace!(target: LOG_TARGET, relay_parent=?relay_parent, "Stop {:?}", relay_parent); + trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Stop {:?}", relay_parent); // defer the cleanup to the view change } } FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => { - trace!(target: LOG_TARGET, hash=?hash, "Block finalized {:?}", hash); + trace!(target: LOG_TARGET, hash = %hash, "Block finalized {:?}", hash); } FromOverseer::Signal(OverseerSignal::Conclude) => { trace!(target: LOG_TARGET, "Conclude"); @@ -235,7 +235,7 @@ async fn modify_reputation( where Context: SubsystemContext, { - trace!(target: LOG_TARGET, rep=?rep, peer=?peer, "Reputation change of {:?} for peer {:?}", rep, peer); + trace!(target: LOG_TARGET, rep = ?rep, peer_id = %peer, "Reputation change of {:?} for peer {:?}", rep, peer); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), )) @@ -262,7 +262,7 @@ where } else { trace!( target: LOG_TARGET, - relay_parent=?relay_parent, + relay_parent = %relay_parent, "Not supposed to work on relay parent related data", ); @@ -270,7 +270,7 @@ where }; let validator_set = &job_data.validator_set; if validator_set.is_empty() { - trace!(target: LOG_TARGET, relay_parent=?relay_parent, "Validator set for {:?} is empty", relay_parent); + trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Validator set for {:?} is empty", relay_parent); return Ok(()); } @@ -344,7 +344,7 @@ where if interested_peers.is_empty() { trace!( target: LOG_TARGET, - relay_parent = ?message.relay_parent, + relay_parent = %message.relay_parent, "No peers are interested in gossip for relay parent {:?}", message.relay_parent ); @@ -388,7 +388,7 @@ where if validator_set.is_empty() { trace!( target: LOG_TARGET, - relay_parent = ?message.relay_parent, + relay_parent = %message.relay_parent, "Validator set for relay parent {:?} is empty", &message.relay_parent ); @@ -477,7 +477,7 @@ where NetworkBridgeEvent::PeerMessage(remote, message) => { match message { protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) => { - trace!(target: LOG_TARGET, peer=?remote, "Received bitfield gossip from peer {:?}", &remote); + trace!(target: LOG_TARGET, peer_id = %remote, "Received bitfield gossip from peer {:?}", &remote); let gossiped_bitfield = BitfieldGossipMessage { relay_parent, signed_availability: bitfield, @@ -498,7 +498,7 @@ fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemRes if !state.per_relay_parent.contains_key(&added) { warn!( target: LOG_TARGET, - added=%added, + added = %added, "Our view contains {} but the overseer never told use we should work on this", &added ); @@ -643,7 +643,7 @@ where SigningContext { parent_hash: relay_parent, session_index: s }, ))), (Err(e), _) | (_, Err(e)) => { - warn!(target: LOG_TARGET, err=?e, "Failed to fetch basics from runtime API: {:?}", e); + warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API: {:?}", e); Ok(None) } } diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 313017a79e05..0654f0bcbfe3 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -291,7 +291,7 @@ fn action_from_overseer_message( Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_))) => Action::Nop, Err(e) => { - tracing::warn!(target: TARGET, err=?e, "Shutting down Network Bridge due to error {:?}", e); + tracing::warn!(target: TARGET, err = ?e, "Shutting down Network Bridge due to error {:?}", e); Action::Abort } } @@ -384,7 +384,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - tracing::warn!(target: TARGET, err=?e, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -392,7 +392,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - tracing::warn!(target: TARGET, err=?e, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -691,7 +691,7 @@ where }; if let Err(e) = res { - tracing::warn!(err=?e, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(err = ?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e); } } @@ -720,7 +720,7 @@ where if let Err(e) = res { tracing::warn!( target: TARGET, - err=?e, + err = ?e, "Aborting - Failure to dispatch messages to overseer", ); return Err(e) @@ -742,7 +742,7 @@ where ).await { tracing::warn!( target: TARGET, - err=?e, + err = ?e, "Aborting - Failure to dispatch messages to overseer", ); return Err(e) @@ -763,7 +763,7 @@ where ).await { tracing::warn!( target: TARGET, - err=?e, + err = ?e, "Aborting - Failure to dispatch messages to overseer", ); return Err(e) diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 9cef5d8d3cc9..d37de99313fa 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -283,7 +283,7 @@ impl Service { PRIORITY_GROUP.to_owned(), multiaddr_to_add, ).await { - tracing::warn!(target: super::TARGET, err=?e, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); + tracing::warn!(target: super::TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); } // the addresses are known to be valid let _ = network_service.remove_from_priority_group(PRIORITY_GROUP.to_owned(), multiaddr_to_remove).await; diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 46081266b286..fa8991e713b6 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -154,7 +154,7 @@ where if !state.view.contains(&relay_parent) { warn!( target: LOG_TARGET, - relay_parent=?relay_parent, + relay_parent = %relay_parent, "Distribute collation message parent {:?} is outside of our view", relay_parent, ); @@ -174,9 +174,9 @@ where None => { warn!( target: LOG_TARGET, - id=?id, - relay_parent=?relay_parent, - "Looks like no core is assigned to {:?} at {:?}", id, relay_parent, + para_id = %id, + relay_parent = %relay_parent, + "Looks like no core is assigned to {} at {}", id, relay_parent, ); return Ok(()); } @@ -188,7 +188,7 @@ where None => { warn!( target: LOG_TARGET, - core=?our_core, + core = ?our_core, "There are no validators assigned to {:?} core", our_core, ); @@ -383,8 +383,8 @@ where // the one we expect, we ignore the message. warn!( target: LOG_TARGET, - para=?receipt.descriptor.para_id, - collating_on=?id, + para_id = %receipt.descriptor.para_id, + collating_on = %id, "DistributeCollation message for para {:?} while collating on {:?}", receipt.descriptor.para_id, id, @@ -396,7 +396,7 @@ where None => { warn!( target: LOG_TARGET, - para=?receipt.descriptor.para_id, + para_id = %receipt.descriptor.para_id, "DistributeCollation message for para {:?} while not collating on any", receipt.descriptor.para_id, ); @@ -429,7 +429,7 @@ where ).await { warn!( target: LOG_TARGET, - err=?e, + err = ?e, "Failed to handle incoming network message: {:?}", e, ); } @@ -504,8 +504,8 @@ where } else { warn!( target: LOG_TARGET, - for_para_id=?para_id, - our_para_id=?our_para_id, + for_para_id = %para_id, + our_para_id = %our_para_id, "Received a RequestCollation for {:?} while collating on {:?}", para_id, our_para_id, ); @@ -514,7 +514,7 @@ where None => { warn!( target: LOG_TARGET, - for_para_id=?para_id, + for_para_id = %para_id, "Received a RequestCollation for {:?} while not collating on any para", para_id, ); @@ -659,7 +659,7 @@ where if let Err(err) = handle_validator_connected(&mut ctx, &mut state, peer_id, validator_id).await { warn!( target: LOG_TARGET, - err=?err, + err = ?err, "Failed to declare our collator id: {:?}", err, ); @@ -673,7 +673,7 @@ where match msg? { Communication { msg } => { if let Err(e) = process_msg(&mut ctx, &mut state, msg).await { - warn!(target: LOG_TARGET, err=?e, "Failed to process message: {}", e); + warn!(target: LOG_TARGET, err = ?e, "Failed to process message: {}", e); } }, Signal(ActiveLeaves(_update)) => {} diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index ae7f55c1aa11..d1f47b1abd38 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -141,8 +141,8 @@ where { trace!( target: LOG_TARGET, - rep=?rep, - peer=?peer, + rep = ?rep, + peer_id = %peer, "Reputation change of {:?} for peer {:?}", rep, peer, ); diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index 5d7cbc5384ad..c8b48abf3109 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -208,7 +208,7 @@ where // may have closed the results channel for some reason. trace!( target: LOG_TARGET, - err=?e, + err = ?e, "Failed to send collation: {:?}", e, ); } @@ -383,9 +383,9 @@ where if !state.view.contains(&relay_parent) { trace!( target: LOG_TARGET, - peer_id=%peer_id, - para_id=%para_id, - relay_parent=?relay_parent, + peer_id = %peer_id, + para_id = %para_id, + relay_parent = %relay_parent, "Collation by {} on {} on relay parent {:?} is no longer in view", peer_id, para_id, relay_parent, ); @@ -395,9 +395,9 @@ where if state.requested_collations.contains_key(&(relay_parent, para_id.clone(), peer_id.clone())) { trace!( target: LOG_TARGET, - peer_id=%peer_id, - para_id=%para_id, - relay_parent=?relay_parent, + peer_id = %peer_id, + para_id = %para_id, + relay_parent = %relay_parent, "Collation by {} on {} on relay parent {:?} has already been requested", peer_id, para_id, relay_parent, ); @@ -622,7 +622,7 @@ where CollateOn(id) => { warn!( target: LOG_TARGET, - id=%id, + para_id = %id, "CollateOn({}) message is not expected on the validator side of the protocol", id, ); } @@ -649,7 +649,7 @@ where ).await { warn!( target: LOG_TARGET, - err=?e, + err = ?e, "Failed to handle incoming network message: {:?}", e, ); } @@ -680,7 +680,7 @@ where loop { if let Poll::Ready(msg) = futures::poll!(ctx.recv()) { let msg = msg?; - trace!(target: LOG_TARGET, msg=?msg, "Received a message {:?}", msg); + trace!(target: LOG_TARGET, msg = ?msg, "Received a message {:?}", msg); match msg { Communication { msg } => process_msg(&mut ctx, msg, &mut state).await?, diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index a7d46f90dd3f..a2734e634273 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -136,7 +136,7 @@ async fn handle_signal( Err(e) => { tracing::warn!( target: LOG_TARGET, - err=?e, + err = ?e, "Error fetching validators from runtime API for active leaf: {:?}", e ); diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 6c45e521e087..93c5aa070a80 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -689,7 +689,7 @@ async fn handle_incoming_message<'a>( // This should never be out-of-sync with our view if the view updates // correspond to actual `StartWork` messages. So we just log and ignore. tracing::warn!( - relay_parent=%relay_parent, + relay_parent = %relay_parent, "Our view out-of-sync with active heads. Head {} not found", relay_parent, ); @@ -853,7 +853,7 @@ async fn handle_network_update( if !active_heads.contains_key(&new) { tracing::warn!( target: LOG_TARGET, - unknown_hash=%new, + unknown_hash = %new, "Our network bridge view update \ inconsistent with `StartWork` messages we have received from overseer. \ Contains unknown hash {}", @@ -910,7 +910,7 @@ impl StatementDistribution { (Err(e), _) | (_, Err(e)) => { tracing::warn!( target: LOG_TARGET, - err=?e, + err = ?e, "Failed to fetch runtime API data for active leaf: {:?}", e, ); diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 7f998dda95c8..bc1e39eb6b2e 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -1337,7 +1337,7 @@ where // Some subsystem exited? It's time to panic. if let Poll::Ready(Some(finished)) = poll!(self.running_subsystems.next()) { - tracing::error!(target: LOG_TARGET, subsystem=?finished, "Subsystem finished unexpectedly {:?}", finished); + tracing::error!(target: LOG_TARGET, subsystem = ?finished, "Subsystem finished unexpectedly {:?}", finished); self.stop().await; return finished; } @@ -1607,7 +1607,7 @@ fn spawn( let fut = Box::pin(async move { if let Err(e) = future.await { - tracing::error!(subsystem=name, err=?e, "Subsystem {} exited with error {:?}", name, e); + tracing::error!(subsystem=name, err = ?e, "Subsystem {} exited with error {:?}", name, e); } else { tracing::debug!(subsystem=name, "Subsystem {} exited without an error", name); } @@ -1617,7 +1617,7 @@ fn spawn( spawner.spawn(name, fut); let _ = streams.push(from_rx); - futures.push(Box::pin(rx.map(|e| { tracing::warn!(err=?e, "Dropping error {:?}", e); Ok(()) }))); + futures.push(Box::pin(rx.map(|e| { tracing::warn!(err = ?e, "Dropping error {:?}", e); Ok(()) }))); let instance = Some(SubsystemInstance { tx: to_tx, diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index dce61304a57d..11c5747b1aa5 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -702,8 +702,8 @@ pub fn new_full( let voting_rule = match grandpa_pause { Some((block, delay)) => { info!( - block=%block, - delay=%delay, + block_number = %block, + delay = %delay, "GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", block, delay, diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 236ded7340fe..d73266984eb6 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -581,8 +581,8 @@ impl Jobs { if let Err(e) = Job::run(parent_hash, run_args, metrics, to_job_rx, from_job_tx).await { tracing::error!( job=Job::NAME, - parent_hash=%parent_hash, - err=?e, + parent_hash = %parent_hash, + err = ?e, "{}({}) finished with an error {:?}", Job::NAME, parent_hash, @@ -594,7 +594,7 @@ impl Jobs { // there's no point trying to propagate this error onto the channel too // all we can do is warn that error propagation has failed if let Err(e) = err_tx.send((Some(parent_hash), JobsError::Job(e))).await { - tracing::warn!(err=?e, "failed to forward error: {:?}", e); + tracing::warn!(err = ?e, "failed to forward error: {:?}", e); } } } @@ -770,7 +770,7 @@ where // if we can't send on the error transmission channel, we can't do anything useful about it // still, we can at least log the failure if let Err(e) = err_tx.send((hash, err)).await { - tracing::warn!(err=?e, "failed to forward error: {:?}", e); + tracing::warn!(err = ?e, "failed to forward error: {:?}", e); } } } @@ -797,7 +797,7 @@ where if let Err(e) = jobs.spawn_job(hash, run_args.clone(), metrics) { tracing::error!( job=Job::NAME, - err=?e, + err = ?e, "Failed to spawn a job({}): {:?}", Job::NAME, e, @@ -832,7 +832,7 @@ where { tracing::error!( job=Job::NAME, - err=?e, + err = ?e, "failed to stop all jobs ({}) on conclude signal: {:?}", Job::NAME, e, @@ -859,7 +859,7 @@ where Err(err) => { tracing::error!( job=Job::NAME, - err=?err, + err = ?err, "error receiving message from subsystem context for job ({}): {:?}", Job::NAME, err, From 2a12b4033b635be9e1c47b6171c861f24524ecaf Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 16:40:05 +0100 Subject: [PATCH 08/33] add instrumentation to the various 'fn run' --- Cargo.lock | 23 +++++++++++++++++++ cli/Cargo.toml | 1 + node/collation-generation/Cargo.toml | 1 + node/collation-generation/src/lib.rs | 1 + node/core/av-store/Cargo.toml | 1 + node/core/av-store/src/lib.rs | 1 + node/core/backing/Cargo.toml | 1 + node/core/backing/src/lib.rs | 1 + node/core/bitfield-signing/src/lib.rs | 1 + node/core/candidate-selection/Cargo.toml | 1 + node/core/candidate-selection/src/lib.rs | 1 + node/core/candidate-validation/Cargo.toml | 1 + node/core/candidate-validation/src/lib.rs | 1 + node/core/chain-api/Cargo.toml | 2 ++ node/core/chain-api/src/lib.rs | 1 + node/core/provisioner/Cargo.toml | 1 + node/core/provisioner/src/lib.rs | 1 + node/core/runtime-api/Cargo.toml | 2 ++ node/core/runtime-api/src/lib.rs | 1 + .../availability-distribution/Cargo.toml | 1 + .../availability-distribution/src/lib.rs | 1 + node/network/bitfield-distribution/Cargo.toml | 1 + node/network/bitfield-distribution/src/lib.rs | 1 + node/network/bridge/Cargo.toml | 1 + node/network/bridge/src/lib.rs | 1 + node/network/collator-protocol/Cargo.toml | 1 + .../collator-protocol/src/collator_side.rs | 1 + node/network/collator-protocol/src/lib.rs | 1 + .../collator-protocol/src/validator_side.rs | 1 + node/network/pov-distribution/Cargo.toml | 1 + node/network/pov-distribution/src/lib.rs | 1 + .../network/statement-distribution/Cargo.toml | 1 + .../network/statement-distribution/src/lib.rs | 1 + node/overseer/Cargo.toml | 1 + node/overseer/src/lib.rs | 1 + node/service/Cargo.toml | 1 + node/subsystem-test-helpers/Cargo.toml | 1 + node/subsystem-util/Cargo.toml | 1 + node/subsystem-util/src/lib.rs | 1 + node/subsystem/Cargo.toml | 1 + node/test/service/Cargo.toml | 1 + 41 files changed, 65 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index ad1237822956..2b525b2011a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4828,6 +4828,7 @@ dependencies = [ "sp-keystore", "tempfile", "tracing", + "tracing-futures", ] [[package]] @@ -4853,6 +4854,7 @@ dependencies = [ "sp-keystore", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -4872,6 +4874,7 @@ dependencies = [ "substrate-browser-utils", "substrate-build-script-utils", "thiserror", + "tracing-futures", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -4894,6 +4897,7 @@ dependencies = [ "sp-keyring", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -4937,6 +4941,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "tracing", + "tracing-futures", ] [[package]] @@ -4953,6 +4958,7 @@ dependencies = [ "sp-core", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -4978,6 +4984,7 @@ dependencies = [ "sp-core", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -5001,6 +5008,7 @@ dependencies = [ "sp-keystore", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -5029,6 +5037,7 @@ dependencies = [ "sp-core", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -5047,6 +5056,7 @@ dependencies = [ "sp-core", "sp-keyring", "tracing", + "tracing-futures", ] [[package]] @@ -5061,6 +5071,8 @@ dependencies = [ "polkadot-primitives", "sp-blockchain", "sp-core", + "tracing", + "tracing-futures", ] [[package]] @@ -5100,6 +5112,7 @@ dependencies = [ "sp-keystore", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -5113,6 +5126,8 @@ dependencies = [ "polkadot-primitives", "sp-api", "sp-core", + "tracing", + "tracing-futures", ] [[package]] @@ -5160,6 +5175,7 @@ dependencies = [ "substrate-prometheus-endpoint", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -5182,6 +5198,7 @@ dependencies = [ "smallvec 1.4.2", "sp-core", "tracing", + "tracing-futures", ] [[package]] @@ -5209,6 +5226,7 @@ dependencies = [ "substrate-prometheus-endpoint", "thiserror", "tracing", + "tracing-futures", ] [[package]] @@ -5229,6 +5247,7 @@ dependencies = [ "sp-core", "streamunordered", "tracing", + "tracing-futures", ] [[package]] @@ -5266,6 +5285,7 @@ dependencies = [ "polkadot-primitives", "sp-core", "tracing", + "tracing-futures", ] [[package]] @@ -5566,6 +5586,7 @@ dependencies = [ "sp-trie", "substrate-prometheus-endpoint", "tracing", + "tracing-futures", "westend-runtime", ] @@ -5590,6 +5611,7 @@ dependencies = [ "sp-keystore", "sp-staking", "tracing", + "tracing-futures", ] [[package]] @@ -5735,6 +5757,7 @@ dependencies = [ "tempfile", "tokio 0.2.21", "tracing", + "tracing-futures", ] [[package]] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index df5375af3328..472f02e4dee5 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -25,6 +25,7 @@ polkadot-parachain = { path = "../parachain", optional = true } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } +tracing-futures = "0.2.4" frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml index abc77727ab56..b352166d873e 100644 --- a/node/collation-generation/Cargo.toml +++ b/node/collation-generation/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 287f7d71c405..1773ac666547 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -74,6 +74,7 @@ impl CollationGenerationSubsystem { /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. /// Otherwise, most are logged and then discarded. + #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] async fn run(mut self, mut ctx: Context) where Context: SubsystemContext, diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml index 2be581f45086..9c6aa8a6b60c 100644 --- a/node/core/av-store/Cargo.toml +++ b/node/core/av-store/Cargo.toml @@ -11,6 +11,7 @@ kvdb = "0.7.0" kvdb-rocksdb = "0.9.1" thiserror = "1.0.21" tracing = "0.1.21" +tracing-futures = "0.2.4" codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index c603ea82129b..9281cf9bb8f2 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -473,6 +473,7 @@ fn get_next_chunk_pruning_time(db: &Arc) -> Option()))] async fn run(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) where Context: SubsystemContext, diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index e3eba3db58f5..fb2d45af1bfd 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -15,6 +15,7 @@ erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } tracing = "0.1.21" +tracing-futures = "0.2.4" thiserror = "1.0.21" [dev-dependencies] diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 6340d48b99c0..115f86ca7638 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -767,6 +767,7 @@ impl util::JobTrait for CandidateBackingJob { const NAME: &'static str = "CandidateBackingJob"; + #[tracing::instrument(skip(keystore, metrics, rx_to, tx_from), fields(subsystem = std::any::type_name::()))] fn run( parent: Hash, keystore: SyncCryptoStorePtr, diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index e84c0476121d..62728cec9522 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -267,6 +267,7 @@ impl JobTrait for BitfieldSigningJob { const NAME: &'static str = "BitfieldSigningJob"; /// Run a job for the parent block indicated + #[tracing::instrument(skip(keystore, metrics, _receiver, sender), fields(subsystem = std::any::type_name::()))] fn run( relay_parent: Hash, keystore: Self::RunArgs, diff --git a/node/core/candidate-selection/Cargo.toml b/node/core/candidate-selection/Cargo.toml index be631c217329..9147bbcd7f29 100644 --- a/node/core/candidate-selection/Cargo.toml +++ b/node/core/candidate-selection/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" thiserror = "1.0.21" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index a9e375a13237..90121363d2c7 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -134,6 +134,7 @@ impl JobTrait for CandidateSelectionJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop + #[tracing::instrument(skip(_relay_parent, _run_args, metrics, receiver, sender), fields(subsystem = std::any::type_name::()))] fn run( _relay_parent: Hash, _run_args: Self::RunArgs, diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index c38eefc01298..7dbc393c3292 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" sp-core = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" } parity-scale-codec = { version = "1.3.0", default-features = false, features = ["bit-vec", "derive"] } diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index f74b36112b81..dd80b68e1876 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -85,6 +85,7 @@ impl Subsystem for CandidateValidationSubsystem where } } +#[tracing::instrument(skip(ctx, spawn, metrics), fields(subsystem = "CandidateValidationSubsystem"))] async fn run( mut ctx: impl SubsystemContext, spawn: impl SpawnNamed + Clone + 'static, diff --git a/node/core/chain-api/Cargo.toml b/node/core/chain-api/Cargo.toml index 23f6ed4dede4..d8931af9c41a 100644 --- a/node/core/chain-api/Cargo.toml +++ b/node/core/chain-api/Cargo.toml @@ -6,6 +6,8 @@ edition = "2018" [dependencies] futures = { version = "0.3.5" } +tracing = "0.1.21" +tracing-futures = "0.2.4" sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs index bed158dbe64f..84f8b9baa9df 100644 --- a/node/core/chain-api/src/lib.rs +++ b/node/core/chain-api/src/lib.rs @@ -75,6 +75,7 @@ impl Subsystem for ChainApiSubsystem where } } +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = std::any::type_name::>()))] async fn run( mut ctx: impl SubsystemContext, subsystem: ChainApiSubsystem, diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml index 833d94826a34..51924950275e 100644 --- a/node/core/provisioner/Cargo.toml +++ b/node/core/provisioner/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" thiserror = "1.0.21" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 2aa56637677c..7f2a9d431cc6 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -152,6 +152,7 @@ impl JobTrait for ProvisioningJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop + #[tracing::instrument(skip(_run_args, metrics, receiver, sender), fields(subsystem = std::any::type_name::()))] fn run( relay_parent: Hash, _run_args: Self::RunArgs, diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml index 81cf95410976..7ebfc6dda702 100644 --- a/node/core/runtime-api/Cargo.toml +++ b/node/core/runtime-api/Cargo.toml @@ -6,6 +6,8 @@ edition = "2018" [dependencies] futures = "0.3.5" +tracing = "0.1.21" +tracing-futures = "0.2.4" sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } polkadot-primitives = { path = "../../../primitives" } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index cf266e578d14..abdf6f0f0198 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -66,6 +66,7 @@ impl Subsystem for RuntimeApiSubsystem where } } +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = std::any::type_name::>()))] async fn run( mut ctx: impl SubsystemContext, subsystem: RuntimeApiSubsystem, diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml index 047050ccb812..6620aec29bca 100644 --- a/node/network/availability-distribution/Cargo.toml +++ b/node/network/availability-distribution/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" codec = { package="parity-scale-codec", version = "1.3.4", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index e6f398e39e77..ba57f9ff2372 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -771,6 +771,7 @@ impl AvailabilityDistributionSubsystem { } /// Start processing work as passed on from the Overseer. + #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] async fn run(self, mut ctx: Context) -> Result<()> where Context: SubsystemContext, diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index a47cf93dd0cd..e8c53fe5f62f 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" codec = { package="parity-scale-codec", version = "1.3.4" } polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 2eae1d321564..9d61c6b5eb90 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -144,6 +144,7 @@ impl BitfieldDistribution { } /// Start processing work as passed on from the Overseer. + #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] async fn run(self, mut ctx: Context) where Context: SubsystemContext, diff --git a/node/network/bridge/Cargo.toml b/node/network/bridge/Cargo.toml index ed5d10f7eabb..b4ab13b6e0e1 100644 --- a/node/network/bridge/Cargo.toml +++ b/node/network/bridge/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" async-trait = "0.1" futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" polkadot-primitives = { path = "../../../primitives" } parity-scale-codec = "1.3.4" sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 0654f0bcbfe3..59e87cddd228 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -569,6 +569,7 @@ async fn dispatch_collation_events_to_all( ctx.send_messages(events.into_iter().flat_map(messages_for)).await } +#[tracing::instrument(skip(network_service, authority_discovery_service, ctx), fields(subsystem = "NetworkBridge"))] async fn run_network( mut network_service: N, mut authority_discovery_service: AD, diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index 7044349b2052..fb997c8de585 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" thiserror = "1.0.21" diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index fa8991e713b6..898bd94a4754 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -635,6 +635,7 @@ async fn handle_our_view_change( } /// The collator protocol collator side main loop. +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = "Collator Protocol: Collator Side"))] pub(crate) async fn run( mut ctx: Context, our_id: CollatorId, diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index d1f47b1abd38..72a0253a8724 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -96,6 +96,7 @@ impl CollatorProtocolSubsystem { } } + #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index c8b48abf3109..de4874e49469 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -660,6 +660,7 @@ where } /// The main run loop. +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = "Collator Protocol: Validator Side"))] pub(crate) async fn run( mut ctx: Context, request_timeout: Duration, diff --git a/node/network/pov-distribution/Cargo.toml b/node/network/pov-distribution/Cargo.toml index 4bcf0eab12a2..4af3148df697 100644 --- a/node/network/pov-distribution/Cargo.toml +++ b/node/network/pov-distribution/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index a2734e634273..d534d5c5bf3c 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -541,6 +541,7 @@ impl PoVDistribution { Self { metrics } } + #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] async fn run( self, mut ctx: impl SubsystemContext, diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index 8a83e0681a6f..6896596927f8 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" polkadot-primitives = { path = "../../../primitives" } node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" } sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 93c5aa070a80..71efeade0b5e 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -869,6 +869,7 @@ async fn handle_network_update( } impl StatementDistribution { + #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] async fn run( self, mut ctx: impl SubsystemContext, diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index 16e0318eee65..6e58f9408abc 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" futures-timer = "3.0.2" streamunordered = "0.5.1" polkadot-primitives = { path = "../../primitives" } diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index bc1e39eb6b2e..b014ca7b7ec7 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -1287,6 +1287,7 @@ where } /// Run the `Overseer`. + #[tracing::instrument(skip(self), fields(subsystem = std::any::type_name::()))] pub async fn run(mut self) -> SubsystemResult<()> { let leaves = std::mem::take(&mut self.leaves); let mut update = ActiveLeavesUpdate::default(); diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index afb04a80547e..f5e5187d0e3c 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -56,6 +56,7 @@ futures = "0.3.4" hex-literal = "0.2.1" lazy_static = "1.4.0" tracing = "0.1.21" +tracing-futures = "0.2.4" parking_lot = "0.9.0" serde = { version = "1.0.102", features = ["derive"] } slog = "2.5.2" diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index 2b9f506d4624..db8c64663d1e 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -10,6 +10,7 @@ async-trait = "0.1" futures = "0.3.5" futures-timer = "3.0.2" tracing = "0.1.21" +tracing-futures = "0.2.4" parity-scale-codec = "1.3.4" parking_lot = "0.10.0" pin-project = "0.4.23" diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index 907f3a5bedd6..1d1568b86938 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -15,6 +15,7 @@ pin-project = "0.4.22" streamunordered = "0.5.1" thiserror = "1.0.21" tracing = "0.1.21" +tracing-futures = "0.2.4" polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index d73266984eb6..5a49c873cd08 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -978,6 +978,7 @@ macro_rules! delegated_subsystem { } /// Run this subsystem + #[tracing::instrument(skip(ctx, run_args, metrics, spawner), fields(subsystem = $subsystem_name))] pub async fn run(ctx: Context, run_args: $run_args, metrics: $metrics, spawner: Spawner) { >::run(ctx, run_args, metrics, spawner, None).await } diff --git a/node/subsystem/Cargo.toml b/node/subsystem/Cargo.toml index 1d2aabe9ad91..85bee27a549b 100644 --- a/node/subsystem/Cargo.toml +++ b/node/subsystem/Cargo.toml @@ -11,6 +11,7 @@ derive_more = "0.99.11" futures = "0.3.5" futures-timer = "3.0.2" tracing = "0.1.21" +tracing-futures = "0.2.4" parity-scale-codec = "1.3.4" parking_lot = { version = "0.10.0", optional = true } pin-project = "0.4.22" diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index 8faf5da4e511..33cbce0cf84a 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -9,6 +9,7 @@ futures = "0.3.4" futures01 = { package = "futures", version = "0.1.29" } hex = "0.4" tracing = "0.1.21" +tracing-futures = "0.2.4" rand = "0.7.3" tempfile = "3.1.0" From 8ff0fe7150c2b7bf3dffd9b78482675048642a9a Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 16:48:10 +0100 Subject: [PATCH 09/33] use explicit tracing module throughout --- .../availability-distribution/src/lib.rs | 7 ++-- node/network/bitfield-distribution/src/lib.rs | 41 +++++++++---------- .../collator-protocol/src/collator_side.rs | 40 +++++++++--------- node/network/collator-protocol/src/lib.rs | 3 +- .../collator-protocol/src/validator_side.rs | 25 ++++++----- 5 files changed, 55 insertions(+), 61 deletions(-) diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index ba57f9ff2372..635ca56cfb2c 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -30,7 +30,6 @@ use futures::{channel::oneshot, FutureExt, TryFutureExt}; use sp_core::crypto::Public; use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; -use tracing::{trace, warn}; use polkadot_erasure_coding::branch_hash; use polkadot_node_network_protocol::{ v1 as protocol_v1, NetworkBridgeEvent, PeerId, ReputationChange as Rep, View, @@ -711,7 +710,7 @@ where ) .await? { - warn!( + tracing::warn!( target: TARGET, "Failed to store erasure chunk to availability store" ); @@ -796,7 +795,7 @@ impl AvailabilityDistributionSubsystem { ) .await { - warn!( + tracing::warn!( target: TARGET, err = ?e, "Failed to handle incoming network messages: {:?}", e @@ -958,7 +957,7 @@ async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) - where Context: SubsystemContext, { - trace!( + tracing::trace!( target: TARGET, "Reputation change of {:?} for peer {:?}", rep, diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 9d61c6b5eb90..ee6bc19c79ad 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -25,7 +25,6 @@ use codec::{Decode, Encode}; use futures::{channel::oneshot, FutureExt}; -use tracing::{debug, trace, warn}; use polkadot_subsystem::messages::*; use polkadot_subsystem::{ ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemResult, @@ -155,7 +154,7 @@ impl BitfieldDistribution { let message = match ctx.recv().await { Ok(message) => message, Err(e) => { - debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer: {}, exiting", e); + tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer: {}, exiting", e); return; }, }; @@ -163,7 +162,7 @@ impl BitfieldDistribution { FromOverseer::Communication { msg: BitfieldDistributionMessage::DistributeBitfield(hash, signed_availability), } => { - trace!(target: LOG_TARGET, "Processing DistributeBitfield"); + tracing::trace!(target: LOG_TARGET, "Processing DistributeBitfield"); if let Err(err) = handle_bitfield_distribution( &mut ctx, &mut state, @@ -171,21 +170,21 @@ impl BitfieldDistribution { hash, signed_availability, ).await { - warn!(target: LOG_TARGET, err = ?err, "Failed to reply to `DistributeBitfield` message: {}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "Failed to reply to `DistributeBitfield` message: {}", err); } } FromOverseer::Communication { msg: BitfieldDistributionMessage::NetworkBridgeUpdateV1(event), } => { - trace!(target: LOG_TARGET, "Processing NetworkMessage"); + tracing::trace!(target: LOG_TARGET, "Processing NetworkMessage"); // a network message was received if let Err(e) = handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await { - warn!(target: LOG_TARGET, err = ?e, "Failed to handle incoming network messages: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to handle incoming network messages: {:?}", e); } } FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { for relay_parent in activated { - trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Start {:?}", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Start {:?}", relay_parent); // query basic system parameters once match query_basics(&mut ctx, relay_parent).await { Ok(Some((validator_set, signing_context))) => { @@ -204,22 +203,22 @@ impl BitfieldDistribution { ); } Err(e) => { - warn!(target: LOG_TARGET, err = ?e, "query_basics has failed: {}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed: {}", e); } _ => {}, } } for relay_parent in deactivated { - trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Stop {:?}", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Stop {:?}", relay_parent); // defer the cleanup to the view change } } FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => { - trace!(target: LOG_TARGET, hash = %hash, "Block finalized {:?}", hash); + tracing::trace!(target: LOG_TARGET, hash = %hash, "Block finalized {:?}", hash); } FromOverseer::Signal(OverseerSignal::Conclude) => { - trace!(target: LOG_TARGET, "Conclude"); + tracing::trace!(target: LOG_TARGET, "Conclude"); return; } } @@ -236,7 +235,7 @@ async fn modify_reputation( where Context: SubsystemContext, { - trace!(target: LOG_TARGET, rep = ?rep, peer_id = %peer, "Reputation change of {:?} for peer {:?}", rep, peer); + tracing::trace!(target: LOG_TARGET, rep = ?rep, peer_id = %peer, "Reputation change of {:?} for peer {:?}", rep, peer); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), )) @@ -261,7 +260,7 @@ where let job_data: &mut _ = if let Some(ref mut job_data) = job_data { job_data } else { - trace!( + tracing::trace!( target: LOG_TARGET, relay_parent = %relay_parent, "Not supposed to work on relay parent related data", @@ -271,7 +270,7 @@ where }; let validator_set = &job_data.validator_set; if validator_set.is_empty() { - trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Validator set for {:?} is empty", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Validator set for {:?} is empty", relay_parent); return Ok(()); } @@ -279,7 +278,7 @@ where let validator = if let Some(validator) = validator_set.get(validator_index) { validator.clone() } else { - trace!(target: LOG_TARGET, "Could not find a validator for index {}", validator_index); + tracing::trace!(target: LOG_TARGET, "Could not find a validator for index {}", validator_index); return Ok(()); }; @@ -343,7 +342,7 @@ where .collect::>(); if interested_peers.is_empty() { - trace!( + tracing::trace!( target: LOG_TARGET, relay_parent = %message.relay_parent, "No peers are interested in gossip for relay parent {:?}", @@ -387,7 +386,7 @@ where let validator_set = &job_data.validator_set; if validator_set.is_empty() { - trace!( + tracing::trace!( target: LOG_TARGET, relay_parent = %message.relay_parent, "Validator set for relay parent {:?} is empty", @@ -430,7 +429,7 @@ where // only relay_message a message of a validator once if one_per_validator.get(&validator).is_some() { - trace!( + tracing::trace!( target: LOG_TARGET, validator_index, "Already received a message for validator at index {}", @@ -478,7 +477,7 @@ where NetworkBridgeEvent::PeerMessage(remote, message) => { match message { protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) => { - trace!(target: LOG_TARGET, peer_id = %remote, "Received bitfield gossip from peer {:?}", &remote); + tracing::trace!(target: LOG_TARGET, peer_id = %remote, "Received bitfield gossip from peer {:?}", &remote); let gossiped_bitfield = BitfieldGossipMessage { relay_parent, signed_availability: bitfield, @@ -497,7 +496,7 @@ fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemRes for added in state.view.difference(&old_view) { if !state.per_relay_parent.contains_key(&added) { - warn!( + tracing::warn!( target: LOG_TARGET, added = %added, "Our view contains {} but the overseer never told use we should work on this", @@ -644,7 +643,7 @@ where SigningContext { parent_hash: relay_parent, session_index: s }, ))), (Err(e), _) | (_, Err(e)) => { - warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API: {:?}", e); Ok(None) } } diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 898bd94a4754..66a9409f41b7 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -19,7 +19,6 @@ use std::collections::HashMap; use super::{LOG_TARGET, Result}; use futures::{StreamExt, task::Poll}; -use tracing::warn; use polkadot_primitives::v1::{ CollatorId, CoreIndex, CoreState, Hash, Id as ParaId, CandidateReceipt, @@ -152,7 +151,7 @@ where // This collation is not in the active-leaves set. if !state.view.contains(&relay_parent) { - warn!( + tracing::warn!( target: LOG_TARGET, relay_parent = %relay_parent, "Distribute collation message parent {:?} is outside of our view", @@ -172,7 +171,7 @@ where let (our_core, num_cores) = match determine_core(ctx, id, relay_parent).await? { Some(core) => core, None => { - warn!( + tracing::warn!( target: LOG_TARGET, para_id = %id, relay_parent = %relay_parent, @@ -186,7 +185,7 @@ where let our_validators = match determine_our_validators(ctx, our_core, num_cores, relay_parent).await? { Some(validators) => validators, None => { - warn!( + tracing::warn!( target: LOG_TARGET, core = ?our_core, "There are no validators assigned to {:?} core", our_core, @@ -381,7 +380,7 @@ where Some(id) if receipt.descriptor.para_id != id => { // If the ParaId of a collation requested to be distributed does not match // the one we expect, we ignore the message. - warn!( + tracing::warn!( target: LOG_TARGET, para_id = %receipt.descriptor.para_id, collating_on = %id, @@ -394,7 +393,7 @@ where distribute_collation(ctx, state, id, receipt, pov).await?; } None => { - warn!( + tracing::warn!( target: LOG_TARGET, para_id = %receipt.descriptor.para_id, "DistributeCollation message for para {:?} while not collating on any", @@ -404,19 +403,19 @@ where } } FetchCollation(_, _, _, _) => { - warn!( + tracing::warn!( target: LOG_TARGET, "FetchCollation message is not expected on the collator side of the protocol", ); } ReportCollator(_) => { - warn!( + tracing::warn!( target: LOG_TARGET, "ReportCollator message is not expected on the collator side of the protocol", ); } NoteGoodCollation(_) => { - warn!( + tracing::warn!( target: LOG_TARGET, "NoteGoodCollation message is not expected on the collator side of the protocol", ); @@ -427,7 +426,7 @@ where state, event, ).await { - warn!( + tracing::warn!( target: LOG_TARGET, err = ?e, "Failed to handle incoming network message: {:?}", e, @@ -483,13 +482,13 @@ where match msg { Declare(_) => { - warn!( + tracing::warn!( target: LOG_TARGET, "Declare message is not expected on the collator side of the protocol", ); } AdvertiseCollation(_, _) => { - warn!( + tracing::warn!( target: LOG_TARGET, "AdvertiseCollation message is not expected on the collator side of the protocol", ); @@ -502,7 +501,7 @@ where send_collation(ctx, state, request_id, origin, collation.0, collation.1).await?; } } else { - warn!( + tracing::warn!( target: LOG_TARGET, for_para_id = %para_id, our_para_id = %our_para_id, @@ -512,7 +511,7 @@ where } } None => { - warn!( + tracing::warn!( target: LOG_TARGET, for_para_id = %para_id, "Received a RequestCollation for {:?} while not collating on any para", @@ -522,7 +521,7 @@ where } } Collation(_, _, _) => { - warn!( + tracing::warn!( target: LOG_TARGET, "Collation message is not expected on the collator side of the protocol", ); @@ -658,7 +657,7 @@ where if let Some(mut request) = state.last_connection_request.take() { while let Poll::Ready(Some((validator_id, peer_id))) = futures::poll!(request.next()) { if let Err(err) = handle_validator_connected(&mut ctx, &mut state, peer_id, validator_id).await { - warn!( + tracing::warn!( target: LOG_TARGET, err = ?err, "Failed to declare our collator id: {:?}", @@ -674,7 +673,7 @@ where match msg? { Communication { msg } => { if let Err(e) = process_msg(&mut ctx, &mut state, msg).await { - warn!(target: LOG_TARGET, err = ?e, "Failed to process message: {}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to process message: {}", e); } }, Signal(ActiveLeaves(_update)) => {} @@ -695,7 +694,6 @@ mod tests { use assert_matches::assert_matches; use futures::{executor, future, Future}; - use tracing::trace; use smallvec::smallvec; use sp_core::crypto::Pair; @@ -852,7 +850,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, msg: CollatorProtocolMessage, ) { - trace!("Sending message:\n{:?}", &msg); + tracing::trace!("Sending message:\n{:?}", &msg); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -867,7 +865,7 @@ mod tests { .await .expect(&format!("{:?} is more than enough to receive messages", TIMEOUT)); - trace!("Received message:\n{:?}", &msg); + tracing::trace!("Received message:\n{:?}", &msg); msg } @@ -876,7 +874,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - trace!("Waiting for message..."); + tracing::trace!("Waiting for message..."); overseer .recv() .timeout(timeout) diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 72a0253a8724..f3fa186bd02c 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -21,7 +21,6 @@ use std::time::Duration; use futures::{channel::oneshot, FutureExt, TryFutureExt}; -use tracing::trace; use thiserror::Error; use polkadot_subsystem::{ @@ -140,7 +139,7 @@ async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) - where Context: SubsystemContext, { - trace!( + tracing::trace!( target: LOG_TARGET, rep = ?rep, peer_id = %peer, diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index de4874e49469..a5fa646f2de7 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -25,7 +25,6 @@ use futures::{ future::BoxFuture, stream::FuturesUnordered, }; -use tracing::{trace, warn}; use polkadot_primitives::v1::{ Id as ParaId, CandidateReceipt, CollatorId, Hash, PoV, @@ -206,7 +205,7 @@ where if let Err(e) = tx.send((collation.1.clone(), collation.2.clone())) { // We do not want this to be fatal because the receving subsystem // may have closed the results channel for some reason. - trace!( + tracing::trace!( target: LOG_TARGET, err = ?e, "Failed to send collation: {:?}", e, @@ -381,7 +380,7 @@ where Context: SubsystemContext { if !state.view.contains(&relay_parent) { - trace!( + tracing::trace!( target: LOG_TARGET, peer_id = %peer_id, para_id = %para_id, @@ -393,7 +392,7 @@ where } if state.requested_collations.contains_key(&(relay_parent, para_id.clone(), peer_id.clone())) { - trace!( + tracing::trace!( target: LOG_TARGET, peer_id = %peer_id, para_id = %para_id, @@ -620,14 +619,14 @@ where match msg { CollateOn(id) => { - warn!( + tracing::warn!( target: LOG_TARGET, para_id = %id, "CollateOn({}) message is not expected on the validator side of the protocol", id, ); } DistributeCollation(_, _) => { - warn!( + tracing::warn!( target: LOG_TARGET, "DistributeCollation message is not expected on the validator side of the protocol", ); @@ -647,7 +646,7 @@ where state, event, ).await { - warn!( + tracing::warn!( target: LOG_TARGET, err = ?e, "Failed to handle incoming network message: {:?}", e, @@ -681,7 +680,7 @@ where loop { if let Poll::Ready(msg) = futures::poll!(ctx.recv()) { let msg = msg?; - trace!(target: LOG_TARGET, msg = ?msg, "Received a message {:?}", msg); + tracing::trace!(target: LOG_TARGET, msg = ?msg, "Received a message {:?}", msg); match msg { Communication { msg } => process_msg(&mut ctx, msg, &mut state).await?, @@ -697,7 +696,7 @@ where // if the chain has not moved on yet. match request { CollationRequestResult::Timeout(id) => { - trace!(target: LOG_TARGET, id, "Request timed out {}", id); + tracing::trace!(target: LOG_TARGET, id, "Request timed out {}", id); request_timed_out(&mut ctx, &mut state, id).await?; } CollationRequestResult::Received(id) => { @@ -794,7 +793,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, msg: CollatorProtocolMessage, ) { - tracing::trace!("Sending message:\n{:?}", &msg); + tracing::tracing::trace!("Sending message:\n{:?}", &msg); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -809,7 +808,7 @@ mod tests { .await .expect(&format!("{:?} is enough to receive messages.", TIMEOUT)); - tracing::trace!("Received message:\n{:?}", &msg); + tracing::tracing::trace!("Received message:\n{:?}", &msg); msg } @@ -818,7 +817,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - tracing::trace!("Waiting for message..."); + tracing::tracing::trace!("Waiting for message..."); overseer .recv() .timeout(timeout) @@ -836,7 +835,7 @@ mod tests { } = test_harness; let pair = CollatorPair::generate().0; - tracing::trace!("activating"); + tracing::tracing::trace!("activating"); overseer_send( &mut virtual_overseer, From f6eeb0d78954c1e59d0b4ebd70b77973e86be40b Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 16:54:29 +0100 Subject: [PATCH 10/33] fix availability distribution test --- Cargo.lock | 1 + node/network/availability-distribution/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 2b525b2011a4..c8b327bf7e20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4839,6 +4839,7 @@ dependencies = [ "env_logger 0.7.1", "futures 0.3.5", "futures-timer 3.0.2", + "log 0.4.11", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml index 6620aec29bca..9b5eaa219b1b 100644 --- a/node/network/availability-distribution/Cargo.toml +++ b/node/network/availability-distribution/Cargo.toml @@ -28,3 +28,4 @@ futures-timer = "3.0.2" env_logger = "0.7.1" assert_matches = "1.3.0" smallvec = "1" +log = "0.4.11" From 6ffc664d89cd4244a19edf0d59122a5a10c28f04 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 17:10:37 +0100 Subject: [PATCH 11/33] don't double-print errors --- node/collation-generation/src/lib.rs | 2 +- node/core/av-store/src/lib.rs | 2 +- node/core/backing/src/lib.rs | 6 ++---- node/core/bitfield-signing/src/lib.rs | 4 ++-- node/core/candidate-selection/src/lib.rs | 2 +- node/core/provisioner/src/lib.rs | 2 +- node/network/availability-distribution/src/lib.rs | 2 +- node/network/bitfield-distribution/src/lib.rs | 8 ++++---- node/network/bridge/src/lib.rs | 2 +- node/network/bridge/src/validator_discovery.rs | 2 +- node/network/collator-protocol/src/collator_side.rs | 4 ++-- node/network/collator-protocol/src/validator_side.rs | 4 ++-- node/service/src/lib.rs | 2 +- node/subsystem-util/src/lib.rs | 4 ++-- 14 files changed, 22 insertions(+), 24 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 1773ac666547..d5656d8b6c49 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -96,7 +96,7 @@ impl CollationGenerationSubsystem { msg = receiver.next().fuse() => { if let Some(msg) = msg { if let Err(err) = ctx.send_message(msg).await { - tracing::warn!(target: LOG_TARGET, err = ?err, "failed to forward message to overseer: {:?}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to forward message to overseer"); break; } } diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 9281cf9bb8f2..7528da3c5c6e 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -1003,7 +1003,7 @@ fn query_inner( } Ok(None) => None, Err(e) => { - tracing::warn!(target: LOG_TARGET, err = ?e, "Error reading from the availability store: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Error reading from the availability store"); None } } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 115f86ca7638..729269cb50c8 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -784,8 +784,7 @@ impl util::JobTrait for CandidateBackingJob { tracing::warn!( target: LOG_TARGET, err = ?e, - "Failed to fetch runtime API data for job: {:?}", - e, + "Failed to fetch runtime API data for job", ); // We can't do candidate validation work if we don't have the @@ -825,8 +824,7 @@ impl util::JobTrait for CandidateBackingJob { tracing::warn!( target: LOG_TARGET, err = ?e, - "Cannot participate in candidate backing: {:?}", - e + "Cannot participate in candidate backing", ); return Ok(()) diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 62728cec9522..37e99b950e4c 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -164,7 +164,7 @@ async fn get_core_availability( Ok(None) => return Ok(false), Err(e) => { // Don't take down the node on runtime API errors. - tracing::warn!(target: LOG_TARGET, err = ?e, "Encountered a runtime API error: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Encountered a runtime API error"); return Ok(false); } }; @@ -380,7 +380,7 @@ mod tests { assert!(!r.0.get(2).unwrap()); break }, - Err(e) => panic!("Failed: {:?}", e), + Err(e) => panic!("Failed"), }, } } diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index 90121363d2c7..b00ae2e016a8 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -232,7 +232,7 @@ impl CandidateSelectionJob { ) .await { - Err(err) => tracing::warn!(target: TARGET, err = ?err, "failed to second a candidate: {:?}", err), + Err(err) => tracing::warn!(target: TARGET, err = ?err, "failed to second a candidate"), Ok(()) => self.seconded_candidate = Some(collator_id), } } diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 7f2a9d431cc6..dc3fb4137393 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -206,7 +206,7 @@ impl ProvisioningJob { ) .await { - tracing::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data: {:?}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data"); self.metrics.on_inherent_data_request(Err(())); } else { self.metrics.on_inherent_data_request(Ok(())); diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index 635ca56cfb2c..e9c04e177f1c 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -798,7 +798,7 @@ impl AvailabilityDistributionSubsystem { tracing::warn!( target: TARGET, err = ?e, - "Failed to handle incoming network messages: {:?}", e + "Failed to handle incoming network messages", ); } } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index ee6bc19c79ad..70b4cca9b6a8 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -154,7 +154,7 @@ impl BitfieldDistribution { let message = match ctx.recv().await { Ok(message) => message, Err(e) => { - tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer: {}, exiting", e); + tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting"); return; }, }; @@ -179,7 +179,7 @@ impl BitfieldDistribution { tracing::trace!(target: LOG_TARGET, "Processing NetworkMessage"); // a network message was received if let Err(e) = handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await { - tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to handle incoming network messages: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to handle incoming network messages"); } } FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { @@ -203,7 +203,7 @@ impl BitfieldDistribution { ); } Err(e) => { - tracing::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed: {}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed"); } _ => {}, } @@ -643,7 +643,7 @@ where SigningContext { parent_hash: relay_parent, session_index: s }, ))), (Err(e), _) | (_, Err(e)) => { - tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API"); Ok(None) } } diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 59e87cddd228..820f9b9f25b0 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -291,7 +291,7 @@ fn action_from_overseer_message( Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_))) => Action::Nop, Err(e) => { - tracing::warn!(target: TARGET, err = ?e, "Shutting down Network Bridge due to error {:?}", e); + tracing::warn!(target: TARGET, err = ?e, "Shutting down Network Bridge due to error"); Action::Abort } } diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index d37de99313fa..bed6fee264ab 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -283,7 +283,7 @@ impl Service { PRIORITY_GROUP.to_owned(), multiaddr_to_add, ).await { - tracing::warn!(target: super::TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); + tracing::warn!(target: super::TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress"); } // the addresses are known to be valid let _ = network_service.remove_from_priority_group(PRIORITY_GROUP.to_owned(), multiaddr_to_remove).await; diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 66a9409f41b7..d030d90bcebf 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -429,7 +429,7 @@ where tracing::warn!( target: LOG_TARGET, err = ?e, - "Failed to handle incoming network message: {:?}", e, + "Failed to handle incoming network message", ); } }, @@ -673,7 +673,7 @@ where match msg? { Communication { msg } => { if let Err(e) = process_msg(&mut ctx, &mut state, msg).await { - tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to process message: {}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to process message"); } }, Signal(ActiveLeaves(_update)) => {} diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index a5fa646f2de7..ca327dfe2c62 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -208,7 +208,7 @@ where tracing::trace!( target: LOG_TARGET, err = ?e, - "Failed to send collation: {:?}", e, + "Failed to send collation", ); } return Ok(()); @@ -649,7 +649,7 @@ where tracing::warn!( target: LOG_TARGET, err = ?e, - "Failed to handle incoming network message: {:?}", e, + "Failed to handle incoming network message", ); } } diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 11c5747b1aa5..5a7a0ff16394 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -421,7 +421,7 @@ where all_subsystems, registry, spawner, - ).map_err(|e| Error::Other(format!("Failed to create an Overseer: {:?}", e))) + ).map_err(|e| Error::Other(format!("Failed to create an Overseer"))) } #[cfg(feature = "full-node")] diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 5a49c873cd08..6a75d231164f 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -594,7 +594,7 @@ impl Jobs { // there's no point trying to propagate this error onto the channel too // all we can do is warn that error propagation has failed if let Err(e) = err_tx.send((Some(parent_hash), JobsError::Job(e))).await { - tracing::warn!(err = ?e, "failed to forward error: {:?}", e); + tracing::warn!(err = ?e, "failed to forward error"); } } } @@ -770,7 +770,7 @@ where // if we can't send on the error transmission channel, we can't do anything useful about it // still, we can at least log the failure if let Err(e) = err_tx.send((hash, err)).await { - tracing::warn!(err = ?e, "failed to forward error: {:?}", e); + tracing::warn!(err = ?e, "failed to forward error"); } } } From b050579cc46a938d45166fb815d258423d721a31 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 10 Nov 2020 17:48:11 +0100 Subject: [PATCH 12/33] remove further redundancy from logs --- node/collation-generation/src/lib.rs | 13 +++----- node/core/av-store/src/lib.rs | 18 +++++------ node/core/av-store/src/tests.rs | 6 ++-- node/core/bitfield-signing/src/lib.rs | 4 +-- node/core/candidate-selection/src/lib.rs | 9 ++---- node/core/proposer/src/lib.rs | 2 +- .../availability-distribution/src/lib.rs | 6 ++-- .../availability-distribution/src/tests.rs | 6 ++-- node/network/bitfield-distribution/src/lib.rs | 23 ++++++------- .../collator-protocol/src/collator_side.rs | 29 +++++++---------- node/network/collator-protocol/src/lib.rs | 2 +- .../collator-protocol/src/validator_side.rs | 12 +++---- node/network/pov-distribution/src/lib.rs | 10 +++--- .../network/statement-distribution/src/lib.rs | 11 +++---- node/overseer/src/lib.rs | 8 ++--- node/subsystem-util/src/lib.rs | 32 +++++++------------ 16 files changed, 77 insertions(+), 114 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index d5656d8b6c49..39fae8c8726b 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -130,7 +130,7 @@ impl CollationGenerationSubsystem { if let Err(err) = handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await { - tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations: {}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations"); }; } false @@ -242,8 +242,7 @@ async fn handle_new_activations( tracing::debug!( target: LOG_TARGET, para_id = %scheduled_core.para_id, - "collator returned no collation on collate for para_id {}.", - scheduled_core.para_id, + "collator returned no collation on collate", ); return } @@ -269,9 +268,7 @@ async fn handle_new_activations( target: LOG_TARGET, para_id = %scheduled_core.para_id, err = ?err, - "failed to calculate erasure root for para_id {}: {:?}", - scheduled_core.para_id, - err + "failed to calculate erasure root", ); return } @@ -308,9 +305,7 @@ async fn handle_new_activations( target: LOG_TARGET, para_id = %scheduled_core.para_id, err = ?err, - "failed to send collation result for para_id {}: {:?}", - scheduled_core.para_id, - err + "failed to send collation result", ); } })).await?; diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 7528da3c5c6e..3ed5149c2ce5 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -77,9 +77,9 @@ impl Error { match self { // don't spam the log with spurious errors Self::RuntimeApi(_) | - Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, "{:?}", self), + Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self), // it's worth reporting otherwise - _ => tracing::warn!(target: LOG_TARGET, "{:?}", self), + _ => tracing::warn!(target: LOG_TARGET, err = ?self), } } } @@ -322,7 +322,7 @@ impl AvailabilityStoreSubsystem { .count(); for record in pov_pruning.drain(..outdated_records_count) { - tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record"); tx.delete( columns::DATA, available_data_key(&record.candidate_hash).as_slice(), @@ -346,7 +346,7 @@ impl AvailabilityStoreSubsystem { .count(); for record in chunk_pruning.drain(..outdated_records_count) { - tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record"); tx.delete( columns::DATA, erasure_chunk_key(&record.candidate_hash, record.chunk_index).as_slice(), @@ -565,8 +565,7 @@ where tracing::trace!( target: LOG_TARGET, block_number = %record.block_number, - "Updating pruning record for finalized block {}", - record.block_number, + "Updating pruning record for finalized block", ); record.prune_at = PruningDelay::into_the_future( @@ -585,8 +584,7 @@ where tracing::trace!( target: LOG_TARGET, block_number = %record.block_number, - "Updating chunk pruning record for finalized block {}", - record.block_number, + "Updating chunk pruning record for finalized block", ); record.prune_at = PruningDelay::into_the_future( @@ -613,12 +611,12 @@ where let events = match request_candidate_events(ctx, hash).await { Ok(events) => events, Err(err) => { - tracing::debug!(target: LOG_TARGET, err = ?err, "requesting candidate events failed due to {}", err); + tracing::debug!(target: LOG_TARGET, err = ?err, "requesting candidate events failed"); return Ok(()); } }; - tracing::trace!(target: LOG_TARGET, hash = %hash, "block activated: {:?}", hash); + tracing::trace!(target: LOG_TARGET, hash = %hash, "block activated"); let mut included = HashSet::new(); for event in events.into_iter() { diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs index 3d2e5d07d533..19a7339eebb7 100644 --- a/node/core/av-store/src/tests.rs +++ b/node/core/av-store/src/tests.rs @@ -127,7 +127,7 @@ async fn overseer_send( overseer: &mut test_helpers::TestSubsystemContextHandle, msg: AvailabilityStoreMessage, ) { - tracing::trace!("Sending message:\n{:?}", &msg); + tracing::trace!(meg = ?msg, "sending message"); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -142,7 +142,7 @@ async fn overseer_recv( .await .expect(&format!("{:?} is more than enough to receive messages", TIMEOUT)); - tracing::trace!("Received message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "received message"); msg } @@ -151,7 +151,7 @@ async fn overseer_recv_with_timeout( overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - tracing::trace!("Waiting for message..."); + tracing::trace!("waiting for message..."); overseer .recv() .timeout(timeout) diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 37e99b950e4c..e40446646f68 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -294,7 +294,7 @@ impl JobTrait for BitfieldSigningJob { { Err(Error::Runtime(runtime_err)) => { // Don't take down the node on runtime API errors. - tracing::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error: {:?}", runtime_err); + tracing::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error"); return Ok(()); } Err(err) => return Err(err), @@ -380,7 +380,7 @@ mod tests { assert!(!r.0.get(2).unwrap()); break }, - Err(e) => panic!("Failed"), + Err(e) => panic!("Failed: {:?}", e), }, } } diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index b00ae2e016a8..0de8eb994b0b 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -216,8 +216,7 @@ impl CandidateSelectionJob { tracing::warn!( target: TARGET, err = ?err, - "failed to get collation from collator protocol subsystem: {:?}", - err + "failed to get collation from collator protocol subsystem", ); return; } @@ -252,8 +251,7 @@ impl CandidateSelectionJob { tracing::info!( target: TARGET, candidate_receipt = ?candidate_receipt, - "received invalidity note for candidate {:?}", - candidate_receipt + "received invalidity note for candidate", ); let result = @@ -261,8 +259,7 @@ impl CandidateSelectionJob { tracing::warn!( target: TARGET, err = ?err, - "failed to forward invalidity note: {:?}", - err + "failed to forward invalidity note", ); Err(()) } else { diff --git a/node/core/proposer/src/lib.rs b/node/core/proposer/src/lib.rs index 70d3117d7637..82507bdbf8c1 100644 --- a/node/core/proposer/src/lib.rs +++ b/node/core/proposer/src/lib.rs @@ -193,7 +193,7 @@ where let provisioner_data = match self.get_provisioner_data().await { Ok(pd) => pd, Err(err) => { - tracing::warn!(err = ?err, "could not get provisioner inherent data; injecting default data: {}", err); + tracing::warn!(err = ?err, "could not get provisioner inherent data; injecting default data"); Default::default() } }; diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index e9c04e177f1c..5c73d171a031 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -959,9 +959,9 @@ where { tracing::trace!( target: TARGET, - "Reputation change of {:?} for peer {:?}", - rep, - peer + rep = ?rep, + peer_id = ?peer, + "Reputation change for peer", ); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), diff --git a/node/network/availability-distribution/src/tests.rs b/node/network/availability-distribution/src/tests.rs index 6186dd0cee78..418c500ae4d7 100644 --- a/node/network/availability-distribution/src/tests.rs +++ b/node/network/availability-distribution/src/tests.rs @@ -103,7 +103,7 @@ async fn overseer_send( overseer: &mut test_helpers::TestSubsystemContextHandle, msg: AvailabilityDistributionMessage, ) { - tracing::trace!("Sending message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "sending message"); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -114,13 +114,13 @@ async fn overseer_send( async fn overseer_recv( overseer: &mut test_helpers::TestSubsystemContextHandle, ) -> AllMessages { - tracing::trace!("Waiting for message ..."); + tracing::trace!("waiting for message ..."); let msg = overseer .recv() .timeout(TIMEOUT) .await .expect("TIMEOUT is enough to recv."); - tracing::trace!("Received message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "received message"); msg } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 70b4cca9b6a8..0d4fa8ca1dec 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -170,7 +170,7 @@ impl BitfieldDistribution { hash, signed_availability, ).await { - tracing::warn!(target: LOG_TARGET, err = ?err, "Failed to reply to `DistributeBitfield` message: {}", err); + tracing::warn!(target: LOG_TARGET, err = ?err, "Failed to reply to `DistributeBitfield` message"); } } FromOverseer::Communication { @@ -184,7 +184,7 @@ impl BitfieldDistribution { } FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { for relay_parent in activated { - tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Start {:?}", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "activated"); // query basic system parameters once match query_basics(&mut ctx, relay_parent).await { Ok(Some((validator_set, signing_context))) => { @@ -210,12 +210,12 @@ impl BitfieldDistribution { } for relay_parent in deactivated { - tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Stop {:?}", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "deactivated"); // defer the cleanup to the view change } } FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => { - tracing::trace!(target: LOG_TARGET, hash = %hash, "Block finalized {:?}", hash); + tracing::trace!(target: LOG_TARGET, hash = %hash, "block finalized"); } FromOverseer::Signal(OverseerSignal::Conclude) => { tracing::trace!(target: LOG_TARGET, "Conclude"); @@ -235,7 +235,7 @@ async fn modify_reputation( where Context: SubsystemContext, { - tracing::trace!(target: LOG_TARGET, rep = ?rep, peer_id = %peer, "Reputation change of {:?} for peer {:?}", rep, peer); + tracing::trace!(target: LOG_TARGET, rep = ?rep, peer_id = %peer, "reputation change"); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), )) @@ -270,7 +270,7 @@ where }; let validator_set = &job_data.validator_set; if validator_set.is_empty() { - tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "Validator set for {:?} is empty", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "validator set is empty"); return Ok(()); } @@ -345,8 +345,7 @@ where tracing::trace!( target: LOG_TARGET, relay_parent = %message.relay_parent, - "No peers are interested in gossip for relay parent {:?}", - message.relay_parent + "no peers are interested in gossip for relay parent", ); } else { ctx.send_message(AllMessages::NetworkBridge( @@ -389,8 +388,7 @@ where tracing::trace!( target: LOG_TARGET, relay_parent = %message.relay_parent, - "Validator set for relay parent {:?} is empty", - &message.relay_parent + "Validator set is empty", ); return modify_reputation(ctx, origin, COST_MISSING_PEER_SESSION_KEY).await; } @@ -432,8 +430,7 @@ where tracing::trace!( target: LOG_TARGET, validator_index, - "Already received a message for validator at index {}", - validator_index + "already received a message for validator", ); modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE).await?; return Ok(()); @@ -477,7 +474,7 @@ where NetworkBridgeEvent::PeerMessage(remote, message) => { match message { protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) => { - tracing::trace!(target: LOG_TARGET, peer_id = %remote, "Received bitfield gossip from peer {:?}", &remote); + tracing::trace!(target: LOG_TARGET, peer_id = %remote, "received bitfield gossip from peer"); let gossiped_bitfield = BitfieldGossipMessage { relay_parent, signed_availability: bitfield, diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index d030d90bcebf..9df0b5ffd8bc 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -154,8 +154,7 @@ where tracing::warn!( target: LOG_TARGET, relay_parent = %relay_parent, - "Distribute collation message parent {:?} is outside of our view", - relay_parent, + "distribute collation message parent is outside of our view", ); return Ok(()); @@ -175,7 +174,7 @@ where target: LOG_TARGET, para_id = %id, relay_parent = %relay_parent, - "Looks like no core is assigned to {} at {}", id, relay_parent, + "looks like no core is assigned to {} at {}", id, relay_parent, ); return Ok(()); } @@ -188,7 +187,7 @@ where tracing::warn!( target: LOG_TARGET, core = ?our_core, - "There are no validators assigned to {:?} core", our_core, + "there are no validators assigned to core", ); return Ok(()); @@ -384,9 +383,7 @@ where target: LOG_TARGET, para_id = %receipt.descriptor.para_id, collating_on = %id, - "DistributeCollation message for para {:?} while collating on {:?}", - receipt.descriptor.para_id, - id, + "DistributeCollation for unexpected para_id", ); } Some(id) => { @@ -396,8 +393,7 @@ where tracing::warn!( target: LOG_TARGET, para_id = %receipt.descriptor.para_id, - "DistributeCollation message for para {:?} while not collating on any", - receipt.descriptor.para_id, + "DistributeCollation message while not collating on any", ); } } @@ -505,8 +501,7 @@ where target: LOG_TARGET, for_para_id = %para_id, our_para_id = %our_para_id, - "Received a RequestCollation for {:?} while collating on {:?}", - para_id, our_para_id, + "received a RequestCollation for unexpected para_id", ); } } @@ -514,8 +509,7 @@ where tracing::warn!( target: LOG_TARGET, for_para_id = %para_id, - "Received a RequestCollation for {:?} while not collating on any para", - para_id, + "received a RequestCollation while not collating on any para", ); } } @@ -660,8 +654,7 @@ where tracing::warn!( target: LOG_TARGET, err = ?err, - "Failed to declare our collator id: {:?}", - err, + "Failed to declare our collator id", ); } } @@ -850,7 +843,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, msg: CollatorProtocolMessage, ) { - tracing::trace!("Sending message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "sending message"); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -865,7 +858,7 @@ mod tests { .await .expect(&format!("{:?} is more than enough to receive messages", TIMEOUT)); - tracing::trace!("Received message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "received message"); msg } @@ -874,7 +867,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - tracing::trace!("Waiting for message..."); + tracing::trace!("waiting for message..."); overseer .recv() .timeout(timeout) diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index f3fa186bd02c..0f12ae174452 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -143,7 +143,7 @@ where target: LOG_TARGET, rep = ?rep, peer_id = %peer, - "Reputation change of {:?} for peer {:?}", rep, peer, + "reputation change for peer", ); ctx.send_message(AllMessages::NetworkBridge( diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index ca327dfe2c62..c16d3f625543 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -385,8 +385,7 @@ where peer_id = %peer_id, para_id = %para_id, relay_parent = %relay_parent, - "Collation by {} on {} on relay parent {:?} is no longer in view", - peer_id, para_id, relay_parent, + "collation is no longer in view", ); return Ok(()); } @@ -397,8 +396,7 @@ where peer_id = %peer_id, para_id = %para_id, relay_parent = %relay_parent, - "Collation by {} on {} on relay parent {:?} has already been requested", - peer_id, para_id, relay_parent, + "collation has already been requested", ); return Ok(()); } @@ -622,7 +620,7 @@ where tracing::warn!( target: LOG_TARGET, para_id = %id, - "CollateOn({}) message is not expected on the validator side of the protocol", id, + "CollateOn message is not expected on the validator side of the protocol", ); } DistributeCollation(_, _) => { @@ -680,7 +678,7 @@ where loop { if let Poll::Ready(msg) = futures::poll!(ctx.recv()) { let msg = msg?; - tracing::trace!(target: LOG_TARGET, msg = ?msg, "Received a message {:?}", msg); + tracing::trace!(target: LOG_TARGET, msg = ?msg, "received a message"); match msg { Communication { msg } => process_msg(&mut ctx, msg, &mut state).await?, @@ -696,7 +694,7 @@ where // if the chain has not moved on yet. match request { CollationRequestResult::Timeout(id) => { - tracing::trace!(target: LOG_TARGET, id, "Request timed out {}", id); + tracing::trace!(target: LOG_TARGET, id, "request timed out"); request_timed_out(&mut ctx, &mut state, id).await?; } CollationRequestResult::Received(id) => { diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index d534d5c5bf3c..1c7082fbc080 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -137,8 +137,7 @@ async fn handle_signal( tracing::warn!( target: LOG_TARGET, err = ?e, - "Error fetching validators from runtime API for active leaf: {:?}", - e + "Error fetching validators from runtime API for active leaf", ); // Not adding bookkeeping here might make us behave funny, but we @@ -288,10 +287,9 @@ async fn handle_fetch( if relay_parent_state.fetching.len() > 2 * relay_parent_state.n_validators { tracing::warn!( - fetching_len=relay_parent_state.fetching.len(), - "Other subsystems have requested PoV distribution to \ - fetch more PoVs than reasonably expected: {}", - relay_parent_state.fetching.len()); + relay_parent_state.fetching.len = relay_parent_state.fetching.len(), + "other subsystems have requested PoV distribution to fetch more PoVs than reasonably expected", + ); return Ok(()); } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 71efeade0b5e..c74d6fa4c5ab 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -689,9 +689,8 @@ async fn handle_incoming_message<'a>( // This should never be out-of-sync with our view if the view updates // correspond to actual `StartWork` messages. So we just log and ignore. tracing::warn!( - relay_parent = %relay_parent, - "Our view out-of-sync with active heads. Head {} not found", - relay_parent, + requested_relay_parent = %relay_parent, + "our view out-of-sync with active heads; head not found", ); return Ok(None); } @@ -856,8 +855,7 @@ async fn handle_network_update( unknown_hash = %new, "Our network bridge view update \ inconsistent with `StartWork` messages we have received from overseer. \ - Contains unknown hash {}", - new, + Contains unknown hash.", ); } } @@ -912,8 +910,7 @@ impl StatementDistribution { tracing::warn!( target: LOG_TARGET, err = ?e, - "Failed to fetch runtime API data for active leaf: {:?}", - e, + "Failed to fetch runtime API data for active leaf", ); // Lacking this bookkeeping might make us behave funny, although diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index b014ca7b7ec7..c7bb807d53dc 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -1338,7 +1338,7 @@ where // Some subsystem exited? It's time to panic. if let Poll::Ready(Some(finished)) = poll!(self.running_subsystems.next()) { - tracing::error!(target: LOG_TARGET, subsystem = ?finished, "Subsystem finished unexpectedly {:?}", finished); + tracing::error!(target: LOG_TARGET, subsystem = ?finished, "subsystem finished unexpectedly"); self.stop().await; return finished; } @@ -1608,9 +1608,9 @@ fn spawn( let fut = Box::pin(async move { if let Err(e) = future.await { - tracing::error!(subsystem=name, err = ?e, "Subsystem {} exited with error {:?}", name, e); + tracing::error!(subsystem=name, err = ?e, "subsystem exited with error"); } else { - tracing::debug!(subsystem=name, "Subsystem {} exited without an error", name); + tracing::debug!(subsystem=name, "subsystem exited without an error"); } let _ = tx.send(()); }); @@ -1618,7 +1618,7 @@ fn spawn( spawner.spawn(name, fut); let _ = streams.push(from_rx); - futures.push(Box::pin(rx.map(|e| { tracing::warn!(err = ?e, "Dropping error {:?}", e); Ok(()) }))); + futures.push(Box::pin(rx.map(|e| { tracing::warn!(err = ?e, "dropping error"); Ok(()) }))); let instance = Some(SubsystemInstance { tx: to_tx, diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 6a75d231164f..2c7129f35c17 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -580,13 +580,10 @@ impl Jobs { let (future, abort_handle) = future::abortable(async move { if let Err(e) = Job::run(parent_hash, run_args, metrics, to_job_rx, from_job_tx).await { tracing::error!( - job=Job::NAME, + job = Job::NAME, parent_hash = %parent_hash, err = ?e, - "{}({}) finished with an error {:?}", - Job::NAME, - parent_hash, - e, + "job finished with an error", ); if let Some(mut err_tx) = err_tx { @@ -635,7 +632,7 @@ impl Jobs { async fn send_msg(&mut self, parent_hash: Hash, msg: Job::ToJob) { if let Entry::Occupied(mut job) = self.running.entry(parent_hash) { if job.get_mut().send_msg(msg).await.is_err() { - tracing::debug!(job=Job::NAME, "failed to send message to job ({}), will remove it", Job::NAME); + tracing::debug!(job = Job::NAME, "failed to send message to job, will remove it"); job.remove(); } } @@ -796,11 +793,9 @@ where let metrics = metrics.clone(); if let Err(e) = jobs.spawn_job(hash, run_args.clone(), metrics) { tracing::error!( - job=Job::NAME, + job = Job::NAME, err = ?e, - "Failed to spawn a job({}): {:?}", - Job::NAME, - e, + "failed to spawn a job", ); Self::fwd_err(Some(hash), JobsError::Utility(e), err_tx).await; return true; @@ -831,11 +826,9 @@ where .await { tracing::error!( - job=Job::NAME, + job = Job::NAME, err = ?e, - "failed to stop all jobs ({}) on conclude signal: {:?}", - Job::NAME, - e, + "failed to stop a job on conclude signal", ); let e = Error::from(e); Self::fwd_err(None, JobsError::Utility(e), err_tx).await; @@ -848,9 +841,8 @@ where match to_job.relay_parent() { Some(hash) => jobs.send_msg(hash, to_job).await, None => tracing::debug!( - job=Job::NAME, - "Trying to send a message to a job ({}) without specifying a relay parent.", - Job::NAME, + job = Job::NAME, + "trying to send a message to a job without specifying a relay parent", ), } } @@ -858,11 +850,9 @@ where Ok(Signal(BlockFinalized(_))) => {} Err(err) => { tracing::error!( - job=Job::NAME, + job = Job::NAME, err = ?err, - "error receiving message from subsystem context for job ({}): {:?}", - Job::NAME, - err, + "error receiving message from subsystem context for job", ); Self::fwd_err(None, JobsError::Utility(Error::from(err)), err_tx).await; return true; From 3ba81681c0cc1c3063182a7c78a73cd80c1729f0 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Wed, 11 Nov 2020 09:30:10 +0100 Subject: [PATCH 13/33] fix test errors --- Cargo.lock | 2 ++ node/network/bitfield-distribution/Cargo.toml | 1 + node/network/collator-protocol/Cargo.toml | 1 + node/network/collator-protocol/src/validator_side.rs | 8 ++++---- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8b327bf7e20..cd513ae69c41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4815,6 +4815,7 @@ dependencies = [ "bitvec", "env_logger 0.7.1", "futures 0.3.5", + "log 0.4.11", "maplit", "parity-scale-codec", "polkadot-node-network-protocol", @@ -4888,6 +4889,7 @@ dependencies = [ "env_logger 0.7.1", "futures 0.3.5", "futures-timer 3.0.2", + "log 0.4.11", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index e8c53fe5f62f..98f01c480dc0 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -22,6 +22,7 @@ sp-application-crypto = { git = "https://github.com/paritytech/substrate", branc sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } maplit = "1.0.2" +log = "0.4.11" env_logger = "0.7.1" assert_matches = "1.3.0" tempfile = "3.1.0" diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index fb997c8de585..cb393de14dc3 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -17,6 +17,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } [dev-dependencies] +log = "0.4.11" env_logger = "0.7.1" assert_matches = "1.3.0" smallvec = "1.4.2" diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index c16d3f625543..00a2dcec557f 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -791,7 +791,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, msg: CollatorProtocolMessage, ) { - tracing::tracing::trace!("Sending message:\n{:?}", &msg); + tracing::trace!("Sending message:\n{:?}", &msg); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -806,7 +806,7 @@ mod tests { .await .expect(&format!("{:?} is enough to receive messages.", TIMEOUT)); - tracing::tracing::trace!("Received message:\n{:?}", &msg); + tracing::trace!("Received message:\n{:?}", &msg); msg } @@ -815,7 +815,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - tracing::tracing::trace!("Waiting for message..."); + tracing::trace!("Waiting for message..."); overseer .recv() .timeout(timeout) @@ -833,7 +833,7 @@ mod tests { } = test_harness; let pair = CollatorPair::generate().0; - tracing::tracing::trace!("activating"); + tracing::trace!("activating"); overseer_send( &mut virtual_overseer, From 68284e28cb9135444b77c8df8cad177fd4b3e9f2 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Wed, 11 Nov 2020 10:09:59 +0100 Subject: [PATCH 14/33] fix more test errors --- Cargo.lock | 1 + node/core/av-store/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index cd513ae69c41..0df3553a4236 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4975,6 +4975,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "kvdb-rocksdb", + "log 0.4.11", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-subsystem", diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml index 9c6aa8a6b60c..663d7c8216b2 100644 --- a/node/core/av-store/Cargo.toml +++ b/node/core/av-store/Cargo.toml @@ -23,6 +23,7 @@ polkadot-primitives = { path = "../../../primitives" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [dev-dependencies] +log = "0.4.11" env_logger = "0.7.1" assert_matches = "1.3.0" smallvec = "1.4.2" From 3267b546f814ca4c37c3135b1a33a9cd876552c3 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Wed, 11 Nov 2020 10:35:53 +0100 Subject: [PATCH 15/33] remove unused kv_log_macro --- node/overseer/examples/minimal-example.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/overseer/examples/minimal-example.rs b/node/overseer/examples/minimal-example.rs index 03d810da2f0a..e07280c1ca59 100644 --- a/node/overseer/examples/minimal-example.rs +++ b/node/overseer/examples/minimal-example.rs @@ -25,7 +25,6 @@ use futures::{ FutureExt, StreamExt, }; use futures_timer::Delay; -use kv_log_macro as log; use polkadot_primitives::v1::{BlockData, PoV}; use polkadot_overseer::{Overseer, AllSubsystems}; From 27876134084250c44e45656bdeb0ea5890e3ad93 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Wed, 11 Nov 2020 10:57:15 +0100 Subject: [PATCH 16/33] fix unused variable --- node/service/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 5a7a0ff16394..11c5747b1aa5 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -421,7 +421,7 @@ where all_subsystems, registry, spawner, - ).map_err(|e| Error::Other(format!("Failed to create an Overseer"))) + ).map_err(|e| Error::Other(format!("Failed to create an Overseer: {:?}", e))) } #[cfg(feature = "full-node")] From 5074b6444103131753185090750f8e107b3c4ca4 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 10:30:02 +0100 Subject: [PATCH 17/33] add tracing spans to collation generation --- node/collation-generation/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 39fae8c8726b..6d37a157ddf7 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -74,7 +74,7 @@ impl CollationGenerationSubsystem { /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. /// Otherwise, most are logged and then discarded. - #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(mut self, mut ctx: Context) where Context: SubsystemContext, @@ -109,6 +109,7 @@ impl CollationGenerationSubsystem { // note: this doesn't strictly need to be a separate function; it's more an administrative function // so that we don't clutter the run loop. It could in principle be inlined directly into there. // it should hopefully therefore be ok that it's an async function mutably borrowing self. + #[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))] async fn handle_incoming( &mut self, incoming: SubsystemResult>, @@ -177,6 +178,7 @@ where } } +#[tracing::instrument(level = "trace", skip(ctx, metrics, sender), fields(subsystem = LOG_TARGET))] async fn handle_new_activations( config: Arc, activated: &[Hash], @@ -315,6 +317,7 @@ async fn handle_new_activations( Ok(()) } +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn erasure_root( n_validators: usize, persisted_validation: PersistedValidationData, From d83192d7d33ceabfeaa496e54cf0232b960a7200 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 10:44:28 +0100 Subject: [PATCH 18/33] add tracing spans to av-store --- node/core/av-store/src/lib.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 3ed5149c2ce5..f89bc4b6e6bf 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -311,6 +311,7 @@ pub struct AvailabilityStoreSubsystem { impl AvailabilityStoreSubsystem { // Perform pruning of PoVs + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn prune_povs(&self) -> Result<(), Error> { let mut tx = DBTransaction::new(); let mut pov_pruning = pov_pruning(&self.inner).unwrap_or_default(); @@ -335,6 +336,7 @@ impl AvailabilityStoreSubsystem { } // Perform pruning of chunks. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn prune_chunks(&self) -> Result<(), Error> { let mut tx = DBTransaction::new(); let mut chunk_pruning = chunk_pruning(&self.inner).unwrap_or_default(); @@ -361,6 +363,7 @@ impl AvailabilityStoreSubsystem { // Return a `Future` that either resolves when another PoV pruning has to happen // or is indefinitely `pending` in case no pruning has to be done. // Just a helper to `select` over multiple things at once. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn maybe_prune_povs(&self) -> Result, Error> { let future = match get_next_pov_pruning_time(&self.inner) { Some(pruning) => { @@ -375,6 +378,7 @@ impl AvailabilityStoreSubsystem { // Return a `Future` that either resolves when another chunk pruning has to happen // or is indefinitely `pending` in case no pruning has to be done. // Just a helper to `select` over multiple things at once. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn maybe_prune_chunks(&self) -> Result, Error> { let future = match get_next_chunk_pruning_time(&self.inner) { Some(pruning) => { @@ -473,7 +477,7 @@ fn get_next_chunk_pruning_time(db: &Arc) -> Option()))] +#[tracing::instrument(skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn run(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) where Context: SubsystemContext, @@ -493,6 +497,7 @@ where } } +#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn run_iteration(subsystem: &mut AvailabilityStoreSubsystem, ctx: &mut Context) -> Result where @@ -546,6 +551,7 @@ where /// The state of data has to be changed from /// `CandidateState::Included` to `CandidateState::Finalized` and their pruning times have /// to be updated to `now` + keep_finalized_{block, chunk}_for`. +#[tracing::instrument(level = "trace", skip(subsystem, ctx, db), fields(subsystem = LOG_TARGET))] async fn process_block_finalized( subsystem: &AvailabilityStoreSubsystem, ctx: &mut Context, @@ -600,6 +606,7 @@ where Ok(()) } +#[tracing::instrument(level = "trace", skip(ctx, db), fields(subsystem = LOG_TARGET))] async fn process_block_activated( ctx: &mut Context, db: &Arc, @@ -659,6 +666,7 @@ where Ok(()) } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn request_candidate_events( ctx: &mut Context, hash: Hash, @@ -678,6 +686,7 @@ where Ok(rx.await??) } +#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn process_message( subsystem: &mut AvailabilityStoreSubsystem, ctx: &mut Context, @@ -749,6 +758,7 @@ fn chunk_pruning(db: &Arc) -> Option> { query_inner(db, columns::META, &CHUNK_PRUNING_KEY) } +#[tracing::instrument(level = "trace", skip(db, tx), fields(subsystem = LOG_TARGET))] fn put_pov_pruning( db: &Arc, tx: Option, @@ -789,6 +799,7 @@ fn put_pov_pruning( Ok(()) } +#[tracing::instrument(level = "trace", skip(db, tx), fields(subsystem = LOG_TARGET))] fn put_chunk_pruning( db: &Arc, tx: Option, @@ -841,6 +852,7 @@ where Ok(rx.await??.map(|number| number).unwrap_or_default()) } +#[tracing::instrument(level = "trace", skip(subsystem, available_data), fields(subsystem = LOG_TARGET))] fn store_available_data( subsystem: &mut AvailabilityStoreSubsystem, candidate_hash: &CandidateHash, @@ -907,6 +919,7 @@ fn store_available_data( Ok(()) } +#[tracing::instrument(level = "trace", skip(subsystem), fields(subsystem = LOG_TARGET))] fn store_chunk( subsystem: &mut AvailabilityStoreSubsystem, candidate_hash: &CandidateHash, @@ -958,6 +971,7 @@ fn store_chunk( Ok(()) } +#[tracing::instrument(level = "trace", skip(subsystem), fields(subsystem = LOG_TARGET))] fn get_chunk( subsystem: &mut AvailabilityStoreSubsystem, candidate_hash: &CandidateHash, @@ -1023,6 +1037,7 @@ where } } +#[tracing::instrument(level = "trace", skip(metrics), fields(subsystem = LOG_TARGET))] fn get_chunks(data: &AvailableData, n_validators: usize, metrics: &Metrics) -> Result, Error> { let chunks = erasure::obtain_chunks_v1(n_validators, data)?; metrics.on_chunks_received(chunks.len()); From 01b4dd6803217f3511379111a1f696b61eaa1bd5 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 10:51:37 +0100 Subject: [PATCH 19/33] add tracing spans to backing --- node/core/backing/src/lib.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 729269cb50c8..a0243e16e0ea 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -244,6 +244,7 @@ fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement } } +#[tracing::instrument(level = "trace", skip(attested, table_context), fields(subsystem = LOG_TARGET))] fn table_attested_to_backed( attested: TableAttestedCandidate< ParaId, @@ -308,6 +309,7 @@ impl CandidateBackingJob { /// Validate the candidate that is requested to be `Second`ed and distribute validation result. /// /// Returns `Ok(true)` if we issued a `Seconded` statement about this candidate. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn validate_and_second( &mut self, candidate: &CandidateReceipt, @@ -390,6 +392,7 @@ impl CandidateBackingJob { Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn get_backed(&self) -> Vec { let proposed = self.table.proposed_candidates(&self.table_context); let mut res = Vec::with_capacity(proposed.len()); @@ -407,6 +410,7 @@ impl CandidateBackingJob { /// Check if there have happened any new misbehaviors and issue necessary messages. /// /// TODO: Report multiple misbehaviors (https://github.com/paritytech/polkadot/issues/1387) + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> { let mut reports = Vec::new(); @@ -440,6 +444,7 @@ impl CandidateBackingJob { } /// Import a statement into the statement table and return the summary of the import. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn import_statement( &mut self, statement: &SignedFullStatement, @@ -474,6 +479,7 @@ impl CandidateBackingJob { Ok(summary) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn process_msg(&mut self, msg: CandidateBackingMessage) -> Result<(), Error> { match msg { CandidateBackingMessage::Second(_, candidate, pov) => { @@ -521,6 +527,7 @@ impl CandidateBackingJob { } /// Kick off validation work and distribute the result as a signed statement. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn kick_off_validation_work( &mut self, summary: TableSummary, @@ -585,6 +592,7 @@ impl CandidateBackingJob { } /// Import the statement and kick off validation work if it is a part of our assignment. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn maybe_validate_and_import( &mut self, statement: SignedFullStatement, @@ -600,6 +608,7 @@ impl CandidateBackingJob { Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn sign_statement(&self, statement: Statement) -> Option { let signed = self.table_context .validator @@ -611,6 +620,7 @@ impl CandidateBackingJob { Some(signed) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { let idx = statement.validator_index() as usize; @@ -703,6 +713,7 @@ impl CandidateBackingJob { // This calls an inspection function before making the PoV available for any last checks // that need to be done. If the inspection function returns an error, this function returns // early without making the PoV available. + #[tracing::instrument(level = "trace", skip(self, pov, with_commitments), fields(subsystem = LOG_TARGET))] async fn make_pov_available( &mut self, pov: Arc, @@ -767,7 +778,7 @@ impl util::JobTrait for CandidateBackingJob { const NAME: &'static str = "CandidateBackingJob"; - #[tracing::instrument(skip(keystore, metrics, rx_to, tx_from), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(keystore, metrics, rx_to, tx_from), fields(subsystem = LOG_TARGET))] fn run( parent: Hash, keystore: SyncCryptoStorePtr, From ff87f5a870fa2d21af1e4b136aea3db8ae7e265f Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 10:59:49 +0100 Subject: [PATCH 20/33] add tracing spans to bitfield-signing --- node/core/bitfield-signing/Cargo.toml | 1 + node/core/bitfield-signing/src/lib.rs | 4 +++- primitives/src/v1.rs | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/node/core/bitfield-signing/Cargo.toml b/node/core/bitfield-signing/Cargo.toml index 8c9ffef15f3d..8e1ee7ac2e91 100644 --- a/node/core/bitfield-signing/Cargo.toml +++ b/node/core/bitfield-signing/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" tracing = "0.1.21" +tracing-futures = "0.2.4" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index e40446646f68..a0d3f938aec7 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -140,6 +140,7 @@ pub enum Error { /// If there is a candidate pending availability, query the Availability Store /// for whether we have the availability chunk for our validator index. +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_core_availability( relay_parent: Hash, core: CoreState, @@ -201,6 +202,7 @@ async fn get_availability_cores(relay_parent: Hash, sender: &mut mpsc::Sender()))] + #[tracing::instrument(skip(keystore, metrics, _receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, keystore: Self::RunArgs, diff --git a/primitives/src/v1.rs b/primitives/src/v1.rs index b8190dff04e1..a318871a4ca3 100644 --- a/primitives/src/v1.rs +++ b/primitives/src/v1.rs @@ -604,8 +604,8 @@ pub struct ScheduledCore { } /// The state of a particular availability core. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[derive(Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(PartialEq))] pub enum CoreState { /// The core is currently occupied. #[codec(index = "0")] From 423d6bfd717244765e493972f7d1da8e86d66cb9 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 11:06:00 +0100 Subject: [PATCH 21/33] add tracing spans to candidate-selection --- node/core/candidate-selection/src/lib.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index 0de8eb994b0b..b4654d121b0c 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -37,7 +37,7 @@ use polkadot_primitives::v1::{CandidateReceipt, CollatorId, Hash, Id as ParaId, use std::{convert::TryFrom, pin::Pin}; use thiserror::Error; -const TARGET: &'static str = "candidate_selection"; +const LOG_TARGET: &'static str = "candidate_selection"; struct CandidateSelectionJob { sender: mpsc::Sender, @@ -134,7 +134,7 @@ impl JobTrait for CandidateSelectionJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop - #[tracing::instrument(skip(_relay_parent, _run_args, metrics, receiver, sender), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(_relay_parent, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] fn run( _relay_parent: Hash, _run_args: Self::RunArgs, @@ -197,6 +197,7 @@ impl CandidateSelectionJob { Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn handle_collation( &mut self, relay_parent: Hash, @@ -214,7 +215,7 @@ impl CandidateSelectionJob { Ok(response) => response, Err(err) => { tracing::warn!( - target: TARGET, + target: LOG_TARGET, err = ?err, "failed to get collation from collator protocol subsystem", ); @@ -231,25 +232,26 @@ impl CandidateSelectionJob { ) .await { - Err(err) => tracing::warn!(target: TARGET, err = ?err, "failed to second a candidate"), + Err(err) => tracing::warn!(target: LOG_TARGET, err = ?err, "failed to second a candidate"), Ok(()) => self.seconded_candidate = Some(collator_id), } } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn handle_invalid(&mut self, candidate_receipt: CandidateReceipt) { let received_from = match &self.seconded_candidate { Some(peer) => peer, None => { tracing::warn!( - target: TARGET, + target: LOG_TARGET, "received invalidity notice for a candidate we don't remember seconding" ); return; } }; tracing::info!( - target: TARGET, + target: LOG_TARGET, candidate_receipt = ?candidate_receipt, "received invalidity note for candidate", ); @@ -257,7 +259,7 @@ impl CandidateSelectionJob { let result = if let Err(err) = forward_invalidity_note(received_from, &mut self.sender).await { tracing::warn!( - target: TARGET, + target: LOG_TARGET, err = ?err, "failed to forward invalidity note", ); @@ -272,6 +274,7 @@ impl CandidateSelectionJob { // get a collation from the Collator Protocol subsystem // // note that this gets an owned clone of the sender; that's becuase unlike `forward_invalidity_note`, it's expected to take a while longer +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_collation( relay_parent: Hash, para_id: ParaId, @@ -306,7 +309,7 @@ async fn second_candidate( .await { Err(err) => { - tracing::warn!(target: TARGET, err = ?err, "failed to send a seconding message"); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to send a seconding message"); metrics.on_second(Err(())); Err(err.into()) } From 08caa3f4402c90604db93cd376a73a5ec7d13df4 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 11:11:28 +0100 Subject: [PATCH 22/33] add tracing spans to candidate-validation --- node/core/candidate-validation/src/lib.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index dd80b68e1876..4d4d4e1f996d 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -85,7 +85,7 @@ impl Subsystem for CandidateValidationSubsystem where } } -#[tracing::instrument(skip(ctx, spawn, metrics), fields(subsystem = "CandidateValidationSubsystem"))] +#[tracing::instrument(skip(ctx, spawn, metrics), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, spawn: impl SpawnNamed + Clone + 'static, @@ -177,6 +177,7 @@ enum AssumptionCheckOutcome { BadRequest, } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn check_assumption_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -227,6 +228,7 @@ async fn check_assumption_validation_data( }) } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn find_assumed_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -258,6 +260,7 @@ async fn find_assumed_validation_data( Ok(AssumptionCheckOutcome::DoesNotMatch) } +#[tracing::instrument(level = "trace", skip(ctx, pov, spawn), fields(subsystem = LOG_TARGET))] async fn spawn_validate_from_chain_state( ctx: &mut impl SubsystemContext, isolation_strategy: IsolationStrategy, @@ -317,6 +320,7 @@ async fn spawn_validate_from_chain_state( validation_result } +#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn), fields(subsystem = LOG_TARGET))] async fn spawn_validate_exhaustive( ctx: &mut impl SubsystemContext, isolation_strategy: IsolationStrategy, @@ -346,6 +350,7 @@ async fn spawn_validate_exhaustive( /// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks /// are passed, `Err` otherwise. +#[tracing::instrument(level = "trace", skip(pov), fields(subsystem = LOG_TARGET))] fn perform_basic_checks( candidate: &CandidateDescriptor, max_block_data_size: Option, @@ -405,6 +410,7 @@ impl ValidationBackend for RealValidationBackend { /// Validates the candidate from exhaustive parameters. /// /// Sends the result of validation on the channel once complete. +#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn), fields(subsystem = LOG_TARGET))] fn validate_candidate_exhaustive( backend_arg: B::Arg, persisted_validation_data: PersistedValidationData, From 63880f9bc165906120c98de05f29639a0f4baa25 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 11:20:40 +0100 Subject: [PATCH 23/33] add tracing spans to chain-api --- node/core/chain-api/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs index 84f8b9baa9df..9868ead62330 100644 --- a/node/core/chain-api/src/lib.rs +++ b/node/core/chain-api/src/lib.rs @@ -44,6 +44,8 @@ use std::sync::Arc; use futures::prelude::*; +const LOG_TARGET: &str = "ChainApiSubsystem"; + /// The Chain API Subsystem implementation. pub struct ChainApiSubsystem { client: Arc, @@ -75,7 +77,7 @@ impl Subsystem for ChainApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = std::any::type_name::>()))] +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, subsystem: ChainApiSubsystem, @@ -114,6 +116,7 @@ where let _ = response_channel.send(Ok(result)); }, ChainApiMessage::Ancestors { hash, k, response_channel } => { + tracing::span!(tracing::Level::TRACE, "ChainApiMessage::Ancestors", subsystem=LOG_TARGET, hash=%hash, k=k); let mut hash = hash; let next_parent = core::iter::from_fn(|| { From c5bb74becb9ca54df57b978a8fa8621c2b7f450e Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 11:27:09 +0100 Subject: [PATCH 24/33] add tracing spans to provisioner --- node/core/provisioner/src/lib.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index dc3fb4137393..18913613c6b1 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -152,7 +152,7 @@ impl JobTrait for ProvisioningJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop - #[tracing::instrument(skip(_run_args, metrics, receiver, sender), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(_run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, _run_args: Self::RunArgs, @@ -255,6 +255,7 @@ impl ProvisioningJob { Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_provisionable_data(&mut self, provisionable_data: ProvisionableData) { match provisionable_data { ProvisionableData::Bitfield(_, signed_bitfield) => { @@ -287,6 +288,7 @@ type CoreAvailability = BitVec; /// When we're choosing bitfields to include, the rule should be simple: /// maximize availability. So basically, include all bitfields. And then /// choose a coherent set of candidates along with that. +#[tracing::instrument(level = "trace", skip(return_sender, from_job), fields(subsystem = LOG_TARGET))] async fn send_inherent_data( relay_parent: Hash, bitfields: &[SignedAvailabilityBitfield], @@ -324,6 +326,7 @@ async fn send_inherent_data( /// /// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated /// to the sorting of the input. +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn select_availability_bitfields( cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -355,6 +358,7 @@ fn select_availability_bitfields( } /// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -421,6 +425,7 @@ async fn select_candidates( /// Produces a block number 1 higher than that of the relay parent /// in the event of an invalid `relay_parent`, returns `Ok(0)` +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_block_number_under_construction( relay_parent: Hash, sender: &mut mpsc::Sender, @@ -446,6 +451,7 @@ async fn get_block_number_under_construction( /// - construct a transverse slice along `core_idx` /// - bitwise-or it with the availability slice /// - count the 1 bits, compare to the total length; true on 2/3+ +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn bitfields_indicate_availability( core_idx: usize, bitfields: &[SignedAvailabilityBitfield], From 25e60ee46d236ec26da99fe7e18845fc0e046ee5 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 11:29:43 +0100 Subject: [PATCH 25/33] add tracing spans to runtime-api --- node/core/runtime-api/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index abdf6f0f0198..255965815a43 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -40,6 +40,8 @@ use sp_api::{ProvideRuntimeApi}; use futures::prelude::*; +const LOG_TARGET: &str = "RuntimeApi"; + /// The `RuntimeApiSubsystem`. See module docs for more details. pub struct RuntimeApiSubsystem { client: Arc, @@ -66,7 +68,7 @@ impl Subsystem for RuntimeApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = std::any::type_name::>()))] +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, subsystem: RuntimeApiSubsystem, @@ -91,6 +93,7 @@ async fn run( } } +#[tracing::instrument(level = "trace", skip(client, metrics), fields(subsystem = LOG_TARGET))] fn make_runtime_api_request( client: &Client, metrics: &Metrics, From 1f2c6280763920b0765326685f8df42c92b48eae Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 11:41:13 +0100 Subject: [PATCH 26/33] add tracing spans to availability-distribution --- .../availability-distribution/src/lib.rs | 31 ++++++++++++++++--- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index 5c73d171a031..8ba58470cda1 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -52,7 +52,7 @@ use std::collections::{HashMap, HashSet}; use std::iter; use thiserror::Error; -const TARGET: &'static str = "avad"; +const LOG_TARGET: &'static str = "AvailabilityDistribution"; #[derive(Debug, Error)] enum Error { @@ -196,6 +196,7 @@ struct PerRelayParent { impl ProtocolState { /// Collects the relay_parents ancestors including the relay parents themselfes. + #[tracing::instrument(level = "trace", skip(relay_parents), fields(subsystem = LOG_TARGET))] fn extend_with_ancestors<'a>( &'a self, relay_parents: impl IntoIterator + 'a, @@ -217,6 +218,7 @@ impl ProtocolState { /// Unionize all cached entries for the given relay parents and its ancestors. /// Ignores all non existent relay parents, so this can be used directly with a peers view. /// Returns a map from candidate hash -> receipt + #[tracing::instrument(level = "trace", skip(relay_parents), fields(subsystem = LOG_TARGET))] fn cached_live_candidates_unioned<'a>( &'a self, relay_parents: impl IntoIterator + 'a, @@ -231,6 +233,7 @@ impl ProtocolState { .collect() } + #[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn add_relay_parent( &mut self, ctx: &mut Context, @@ -286,6 +289,7 @@ impl ProtocolState { Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn remove_relay_parent(&mut self, relay_parent: &Hash) -> Result<()> { // we might be ancestor of some other relay_parent if let Some(ref mut descendants) = self.ancestry.get_mut(relay_parent) { @@ -326,6 +330,7 @@ impl ProtocolState { /// Deal with network bridge updates and track what needs to be tracked /// which depends on the message type received. +#[tracing::instrument(level = "trace", skip(ctx, keystore, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, keystore: &SyncCryptoStorePtr, @@ -369,6 +374,7 @@ where } /// Handle the changes necessary when our view changes. +#[tracing::instrument(level = "trace", skip(ctx, keystore, metrics), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( ctx: &mut Context, keystore: &SyncCryptoStorePtr, @@ -506,6 +512,7 @@ where .await } +#[tracing::instrument(level = "trace", skip(ctx, metrics, message_iter), fields(subsystem = LOG_TARGET))] async fn send_tracked_gossip_messages_to_peers( ctx: &mut Context, per_candidate: &mut PerCandidate, @@ -555,6 +562,7 @@ where // Send the difference between two views which were not sent // to that particular peer. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut Context, state: &mut ProtocolState, @@ -632,6 +640,7 @@ async fn obtain_our_validator_index( } /// Handle an incoming message from a peer. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut ProtocolState, @@ -711,7 +720,7 @@ where .await? { tracing::warn!( - target: TARGET, + target: LOG_TARGET, "Failed to store erasure chunk to availability store" ); } @@ -770,7 +779,7 @@ impl AvailabilityDistributionSubsystem { } /// Start processing work as passed on from the Overseer. - #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, mut ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -796,7 +805,7 @@ impl AvailabilityDistributionSubsystem { .await { tracing::warn!( - target: TARGET, + target: LOG_TARGET, err = ?e, "Failed to handle incoming network messages", ); @@ -835,6 +844,7 @@ where } /// Obtain all live candidates based on an iterator of relay heads. +#[tracing::instrument(level = "trace", skip(ctx, relay_parents), fields(subsystem = LOG_TARGET))] async fn query_live_candidates_without_ancestors( ctx: &mut Context, relay_parents: impl IntoIterator, @@ -860,6 +870,7 @@ where /// Obtain all live candidates based on an iterator or relay heads including `k` ancestors. /// /// Relay parent. +#[tracing::instrument(level = "trace", skip(ctx, relay_parents), fields(subsystem = LOG_TARGET))] async fn query_live_candidates( ctx: &mut Context, state: &mut ProtocolState, @@ -922,6 +933,7 @@ where } /// Query all para IDs. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_para_ids(ctx: &mut Context, relay_parent: Hash) -> Result> where Context: SubsystemContext, @@ -953,12 +965,13 @@ where } /// Modify the reputation of a peer based on its behavior. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) -> Result<()> where Context: SubsystemContext, { tracing::trace!( - target: TARGET, + target: LOG_TARGET, rep = ?rep, peer_id = ?peer, "Reputation change for peer", @@ -971,6 +984,7 @@ where } /// Query the proof of validity for a particular candidate hash. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_data_availability(ctx: &mut Context, candidate_hash: CandidateHash) -> Result where Context: SubsystemContext, @@ -985,6 +999,7 @@ where .map_err(|e| Error::QueryAvailabilityResponseChannel(e)) } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_chunk( ctx: &mut Context, candidate_hash: CandidateHash, @@ -1002,6 +1017,7 @@ where rx.await.map_err(|e| Error::QueryChunkResponseChannel(e)) } +#[tracing::instrument(level = "trace", skip(ctx, erasure_chunk), fields(subsystem = LOG_TARGET))] async fn store_chunk( ctx: &mut Context, candidate_hash: CandidateHash, @@ -1029,6 +1045,7 @@ where } /// Request the head data for a particular para. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_pending_availability( ctx: &mut Context, relay_parent: Hash, @@ -1051,6 +1068,7 @@ where } /// Query the validator set. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_validators( ctx: &mut Context, relay_parent: Hash, @@ -1073,6 +1091,7 @@ where } /// Query the hash of the `K` ancestors +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_k_ancestors( ctx: &mut Context, relay_parent: Hash, @@ -1097,6 +1116,7 @@ where } /// Query the session index of a relay parent +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_session_index_for_child( ctx: &mut Context, relay_parent: Hash, @@ -1119,6 +1139,7 @@ where } /// Queries up to k ancestors with the constraints of equiv session +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_up_to_k_ancestors_in_same_session( ctx: &mut Context, relay_parent: Hash, From 5dac05cb4b73c5bdebf374199f8cdd21508c0502 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 11:47:08 +0100 Subject: [PATCH 27/33] add tracing spans to bitfield-distribution --- node/network/bitfield-distribution/src/lib.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 0d4fa8ca1dec..309af03909c3 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -78,7 +78,7 @@ impl BitfieldGossipMessage { /// Data used to track information of peers and relay parents the /// overseer ordered us to work on. -#[derive(Default, Clone)] +#[derive(Default, Clone, Debug)] struct ProtocolState { /// track all active peers and their views /// to determine what is relevant to them. @@ -143,7 +143,7 @@ impl BitfieldDistribution { } /// Start processing work as passed on from the Overseer. - #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, mut ctx: Context) where Context: SubsystemContext, @@ -227,6 +227,7 @@ impl BitfieldDistribution { } /// Modify the reputation of a peer based on its behaviour. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation( ctx: &mut Context, peer: PeerId, @@ -245,6 +246,7 @@ where /// Distribute a given valid and signature checked bitfield message. /// /// For this variant the source is this node. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_bitfield_distribution( ctx: &mut Context, state: &mut ProtocolState, @@ -298,6 +300,7 @@ where /// Distribute a given valid and signature checked bitfield message. /// /// Can be originated by another subsystem or received via network from another peer. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn relay_message( ctx: &mut Context, job_data: &mut PerRelayParentData, @@ -360,6 +363,7 @@ where } /// Handle an incoming message from a peer. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut ProtocolState, @@ -447,6 +451,7 @@ where /// Deal with network bridge updates and track what needs to be tracked /// which depends on the message type received. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut ProtocolState, @@ -488,6 +493,7 @@ where } /// Handle the changes necassary when our view changes. +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemResult<()> { let old_view = std::mem::replace(&mut (state.view), view); @@ -511,6 +517,7 @@ fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemRes // Send the difference between two views which were not sent // to that particular peer. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut Context, state: &mut ProtocolState, @@ -561,6 +568,7 @@ where } /// Send a gossip message and track it in the per relay parent data. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn send_tracked_gossip_message( ctx: &mut Context, state: &mut ProtocolState, @@ -611,6 +619,7 @@ where } /// Query our validator set and signing context for a particular relay parent. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_basics( ctx: &mut Context, relay_parent: Hash, From dec5aa6881ad80b875df597deee3b099968fa3d3 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 14:36:27 +0100 Subject: [PATCH 28/33] add tracing spans to network-bridge --- node/network/bridge/src/lib.rs | 26 ++++++++++++------- .../network/bridge/src/validator_discovery.rs | 6 ++++- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 820f9b9f25b0..a17fc387fb43 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -73,7 +73,7 @@ const MALFORMED_VIEW_COST: ReputationChange = ReputationChange::new(-500, "Malformed view"); // network bridge log target -const TARGET: &'static str = "network_bridge"; +const LOG_TARGET: &'static str = "network_bridge"; /// Messages received on the network. #[derive(Debug, Encode, Decode, Clone)] @@ -269,6 +269,7 @@ enum Action { Nop, } +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn action_from_overseer_message( res: polkadot_subsystem::SubsystemResult>, ) -> Action { @@ -291,16 +292,17 @@ fn action_from_overseer_message( Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_))) => Action::Nop, Err(e) => { - tracing::warn!(target: TARGET, err = ?e, "Shutting down Network Bridge due to error"); + tracing::warn!(target: LOG_TARGET, err = ?e, "Shutting down Network Bridge due to error"); Action::Abort } } } +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn action_from_network_message(event: Option) -> Action { match event { None => { - tracing::info!(target: TARGET, "Shutting down Network Bridge: underlying event stream concluded"); + tracing::info!(target: LOG_TARGET, "Shutting down Network Bridge: underlying event stream concluded"); Action::Abort } Some(NetworkEvent::Dht(_)) => Action::Nop, @@ -355,6 +357,7 @@ fn construct_view(live_heads: &[Hash]) -> View { View(live_heads.iter().rev().take(MAX_VIEW_HEADS).cloned().collect()) } +#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(subsystem = LOG_TARGET))] async fn update_view( net: &mut impl Network, ctx: &mut impl SubsystemContext, @@ -384,7 +387,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - tracing::warn!(target: TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: LOG_TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -392,7 +395,7 @@ async fn update_view( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, ).await { - tracing::warn!(target: TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer"); + tracing::warn!(target: LOG_TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer"); return Err(e) } @@ -401,6 +404,7 @@ async fn update_view( // Handle messages on a specific peer-set. The peer is expected to be connected on that // peer-set. +#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(subsystem = LOG_TARGET))] async fn handle_peer_messages( peer: PeerId, peers: &mut HashMap, @@ -447,6 +451,7 @@ async fn handle_peer_messages( Ok(outgoing_messages) } +#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] async fn send_validation_message( net: &mut impl Network, peers: I, @@ -459,6 +464,7 @@ async fn send_validation_message( send_message(net, peers, PeerSet::Validation, message).await } +#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] async fn send_collation_message( net: &mut impl Network, peers: I, @@ -521,6 +527,7 @@ async fn dispatch_collation_event_to_all( dispatch_collation_events_to_all(std::iter::once(event), ctx).await } +#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_validation_events_to_all( events: I, ctx: &mut impl SubsystemContext, @@ -552,6 +559,7 @@ async fn dispatch_validation_events_to_all( ctx.send_messages(events.into_iter().flat_map(messages_for)).await } +#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_collation_events_to_all( events: I, ctx: &mut impl SubsystemContext, @@ -569,7 +577,7 @@ async fn dispatch_collation_events_to_all( ctx.send_messages(events.into_iter().flat_map(messages_for)).await } -#[tracing::instrument(skip(network_service, authority_discovery_service, ctx), fields(subsystem = "NetworkBridge"))] +#[tracing::instrument(skip(network_service, authority_discovery_service, ctx), fields(subsystem = LOG_TARGET))] async fn run_network( mut network_service: N, mut authority_discovery_service: AD, @@ -720,7 +728,7 @@ where if let Err(e) = res { tracing::warn!( - target: TARGET, + target: LOG_TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer", ); @@ -742,7 +750,7 @@ where &mut ctx, ).await { tracing::warn!( - target: TARGET, + target: LOG_TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer", ); @@ -763,7 +771,7 @@ where &mut ctx, ).await { tracing::warn!( - target: TARGET, + target: LOG_TARGET, err = ?e, "Aborting - Failure to dispatch messages to overseer", ); diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index bed6fee264ab..d0cc14619120 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -29,6 +29,7 @@ use polkadot_node_network_protocol::PeerId; use polkadot_primitives::v1::{AuthorityDiscoveryId, Block, Hash}; const PRIORITY_GROUP: &'static str = "parachain_validators"; +const LOG_TARGET: &str = "ValidatorDiscovery"; /// An abstraction over networking for the purposes of validator discovery service. #[async_trait] @@ -163,6 +164,7 @@ impl Service { /// Find connected validators using the given `validator_ids`. /// /// Returns a [`HashMap`] that contains the found [`AuthorityDiscoveryId`]'s and their associated [`PeerId`]'s. + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] async fn find_connected_validators( &mut self, validator_ids: &[AuthorityDiscoveryId], @@ -201,6 +203,7 @@ impl Service { /// This method will also clean up all previously revoked requests. /// it takes `network_service` and `authority_discovery_service` by value /// and returns them as a workaround for the Future: Send requirement imposed by async fn impl. + #[tracing::instrument(level = "trace", skip(self, connected, revoke, network_service, authority_discovery_service), fields(subsystem = LOG_TARGET))] pub async fn on_request( &mut self, validator_ids: Vec, @@ -283,7 +286,7 @@ impl Service { PRIORITY_GROUP.to_owned(), multiaddr_to_add, ).await { - tracing::warn!(target: super::TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress"); + tracing::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress"); } // the addresses are known to be valid let _ = network_service.remove_from_priority_group(PRIORITY_GROUP.to_owned(), multiaddr_to_remove).await; @@ -304,6 +307,7 @@ impl Service { } /// Should be called when a peer connected. + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] pub async fn on_peer_connected(&mut self, peer_id: &PeerId, authority_discovery_service: &mut AD) { // check if it's an authority we've been waiting for let maybe_authority = authority_discovery_service.get_authority_id_by_peer_id(peer_id.clone()).await; From 3503fdfa45e0dab3be242163c022fd3fd8d057cb Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 14:59:43 +0100 Subject: [PATCH 29/33] add tracing spans to collator-protocol --- .../collator-protocol/src/collator_side.rs | 15 ++++++++++++++- node/network/collator-protocol/src/lib.rs | 3 ++- .../collator-protocol/src/validator_side.rs | 15 ++++++++++++++- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 9df0b5ffd8bc..9267ab0916e7 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -137,6 +137,7 @@ struct State { /// or the relay-parent isn't in the active-leaves set, we ignore the message /// as it must be invalid in that case - although this indicates a logic error /// elsewhere in the node. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn distribute_collation( ctx: &mut Context, state: &mut State, @@ -219,6 +220,7 @@ where /// Get the Id of the Core that is assigned to the para being collated on if any /// and the total number of cores. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn determine_core( ctx: &mut Context, para_id: ParaId, @@ -243,6 +245,7 @@ where /// Figure out a group of validators assigned to the para being collated on. /// /// This returns validators for the current group and the next group. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn determine_our_validators( ctx: &mut Context, core_index: CoreIndex, @@ -282,6 +285,7 @@ where } /// Issue a `Declare` collation message to a set of peers. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn declare( ctx: &mut Context, state: &mut State, @@ -304,6 +308,7 @@ where /// Issue a connection request to a set of validators and /// revoke the previous connection request. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn connect_to_validators( ctx: &mut Context, relay_parent: Hash, @@ -329,6 +334,7 @@ where } /// Advertise collation to a set of relay chain validators. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn advertise_collation( ctx: &mut Context, state: &mut State, @@ -360,6 +366,7 @@ where } /// The main incoming message dispatching switch. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_msg( ctx: &mut Context, state: &mut State, @@ -435,6 +442,7 @@ where } /// Issue a response to a previously requested collation. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn send_collation( ctx: &mut Context, state: &mut State, @@ -465,6 +473,7 @@ where } /// A networking messages switch. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_incoming_peer_message( ctx: &mut Context, state: &mut State, @@ -526,6 +535,7 @@ where } /// Our view has changed. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut Context, state: &mut State, @@ -553,6 +563,7 @@ where /// A validator is connected. /// /// `Declare` that we are a collator with a given `CollatorId`. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_validator_connected( ctx: &mut Context, state: &mut State, @@ -575,6 +586,7 @@ where } /// Bridge messages switch. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -609,6 +621,7 @@ where } /// Handles our view changes. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: View, @@ -628,7 +641,7 @@ async fn handle_our_view_change( } /// The collator protocol collator side main loop. -#[tracing::instrument(skip(ctx, metrics), fields(subsystem = "Collator Protocol: Collator Side"))] +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] pub(crate) async fn run( mut ctx: Context, our_id: CollatorId, diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 0f12ae174452..5909c0b8e61d 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -95,7 +95,7 @@ impl CollatorProtocolSubsystem { } } - #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -135,6 +135,7 @@ where } /// Modify the reputation of a peer based on its behavior. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) -> Result<()> where Context: SubsystemContext, diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index 00a2dcec557f..f3a5958a6722 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -187,6 +187,7 @@ struct State { } /// Another subsystem has requested to fetch collations on a particular leaf for some para. +#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(subsystem = LOG_TARGET))] async fn fetch_collation( ctx: &mut Context, state: &mut State, @@ -238,6 +239,7 @@ where } /// Report a collator for some malicious actions. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn report_collator( ctx: &mut Context, state: &mut State, @@ -259,6 +261,7 @@ where } /// Some other subsystem has reported a collator as a good one, bump reputation. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn note_good_collation( ctx: &mut Context, state: &mut State, @@ -279,6 +282,7 @@ where /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be cancelled. /// - Advertisements by this peer that are no longer relevant have to be removed. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( state: &mut State, peer_id: PeerId, @@ -320,6 +324,7 @@ async fn handle_peer_view_change( /// - Cancel all ongoing requests /// - Reply to interested parties if any /// - Store collation. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn received_collation( ctx: &mut Context, state: &mut State, @@ -368,6 +373,7 @@ where /// - Check if the requested collation is in our view. /// - Update PerRequest records with the `result` field if necessary. /// And as such invocations of this function may rely on that. +#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(subsystem = LOG_TARGET))] async fn request_collation( ctx: &mut Context, state: &mut State, @@ -440,6 +446,7 @@ where } /// Notify `CandidateSelectionSubsystem` that a collation has been advertised. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn notify_candidate_selection( ctx: &mut Context, collator: CollatorId, @@ -461,6 +468,7 @@ where } /// Networking message has been received. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, @@ -499,6 +507,7 @@ where /// A leaf has become inactive so we want to /// - Cancel all ongoing collation requests that are on top of that leaf. /// - Remove all stored collations relevant to that leaf. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn remove_relay_parent( state: &mut State, relay_parent: Hash, @@ -524,6 +533,7 @@ async fn remove_relay_parent( } /// Our view has changed. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: View, @@ -547,6 +557,7 @@ async fn handle_our_view_change( } /// A request has timed out. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn request_timed_out( ctx: &mut Context, state: &mut State, @@ -572,6 +583,7 @@ where } /// Bridge event switch. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -605,6 +617,7 @@ where } /// The main message receiver switch. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_msg( ctx: &mut Context, msg: CollatorProtocolMessage, @@ -657,7 +670,7 @@ where } /// The main run loop. -#[tracing::instrument(skip(ctx, metrics), fields(subsystem = "Collator Protocol: Validator Side"))] +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] pub(crate) async fn run( mut ctx: Context, request_timeout: Duration, From 54d083bb7712d5aceef12a56b15f565380bab595 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 15:05:10 +0100 Subject: [PATCH 30/33] add tracing spans to pov-distribution --- node/network/pov-distribution/src/lib.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index 1c7082fbc080..b1d7a8367c4c 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -116,6 +116,7 @@ fn send_pov_message(relay_parent: Hash, pov_hash: Hash, pov: PoV) /// Handles the signal. If successful, returns `true` if the subsystem should conclude, /// `false` otherwise. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_signal( state: &mut State, ctx: &mut impl SubsystemContext, @@ -169,6 +170,7 @@ async fn handle_signal( /// Notify peers that we are awaiting a given PoV hash. /// /// This only notifies peers who have the relay parent in their view. +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] async fn notify_all_we_are_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -195,6 +197,7 @@ async fn notify_all_we_are_awaiting( } /// Notify one peer about everything we're awaiting at a given relay-parent. +#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(subsystem = LOG_TARGET))] async fn notify_one_we_are_awaiting_many( peer: &PeerId, ctx: &mut impl SubsystemContext, @@ -219,6 +222,7 @@ async fn notify_one_we_are_awaiting_many( } /// Distribute a PoV to peers who are awaiting it. +#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(subsystem = LOG_TARGET))] async fn distribute_to_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -255,6 +259,7 @@ async fn distribute_to_awaiting( } /// Handles a `FetchPoV` message. +#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(subsystem = LOG_TARGET))] async fn handle_fetch( state: &mut State, ctx: &mut impl SubsystemContext, @@ -303,6 +308,7 @@ async fn handle_fetch( } /// Handles a `DistributePoV` message. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn handle_distribute( state: &mut State, ctx: &mut impl SubsystemContext, @@ -338,6 +344,7 @@ async fn handle_distribute( } /// Report a reputation change for a peer. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn report_peer( ctx: &mut impl SubsystemContext, peer: PeerId, @@ -347,6 +354,7 @@ async fn report_peer( } /// Handle a notification from a peer that they are awaiting some PoVs. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_awaiting( state: &mut State, ctx: &mut impl SubsystemContext, @@ -401,6 +409,7 @@ async fn handle_awaiting( /// Handle an incoming PoV from our peer. Reports them if unexpected, rewards them if not. /// /// Completes any requests awaiting that PoV. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn handle_incoming_pov( state: &mut State, ctx: &mut impl SubsystemContext, @@ -468,6 +477,7 @@ async fn handle_incoming_pov( } /// Handles a network bridge update. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, @@ -539,7 +549,7 @@ impl PoVDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, From 8208d9c9259e493403cc4cc50430699d4ec97032 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 15:15:04 +0100 Subject: [PATCH 31/33] add tracing spans to statement-distribution --- node/network/statement-distribution/src/lib.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index c74d6fa4c5ab..383f34a6fc90 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -164,6 +164,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option { let already_known = self.sent_statements.contains(fingerprint) || self.received_statements.contains(fingerprint); @@ -212,6 +213,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, fingerprint: &(CompactStatement, ValidatorIndex), @@ -278,6 +280,7 @@ impl PeerData { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send( &mut self, relay_parent: &Hash, @@ -302,6 +305,7 @@ impl PeerData { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, relay_parent: &Hash, @@ -411,6 +415,7 @@ impl ActiveHeadData { /// /// Any other statements or those that reference a candidate we are not aware of cannot be accepted /// and will return `NotedStatement::NotUseful`. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -490,6 +495,7 @@ fn check_statement_signature( /// Informs all registered listeners about a newly received statement. /// /// Removes all closed listeners. +#[tracing::instrument(level = "trace", skip(listeners), fields(subsystem = LOG_TARGET))] async fn inform_statement_listeners( statement: &SignedFullStatement, listeners: &mut Vec>, @@ -509,6 +515,7 @@ async fn inform_statement_listeners( /// circulates the statement to all peers who have not seen it yet, and /// sends all statements dependent on that statement to peers who could previously not receive /// them but now can. +#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn circulate_statement_and_dependents( peers: &mut HashMap, active_heads: &mut HashMap, @@ -564,6 +571,7 @@ fn statement_message(relay_parent: Hash, statement: SignedFullStatement) /// Circulates a statement to all peers who have not seen it yet, and returns /// an iterator over peers who need to have dependent statements sent. +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] async fn circulate_statement( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -597,6 +605,7 @@ async fn circulate_statement( } /// Send all statements about a given candidate hash to a peer. +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements_about( peer: PeerId, peer_data: &mut PeerData, @@ -625,6 +634,7 @@ async fn send_statements_about( } /// Send all statements at a given relay-parent to a peer. +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements( peer: PeerId, peer_data: &mut PeerData, @@ -666,6 +676,7 @@ async fn report_peer( // // This function checks the signature and ensures the statement is compatible with our // view. +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn handle_incoming_message<'a>( peer: PeerId, peer_data: &mut PeerData, @@ -744,6 +755,7 @@ async fn handle_incoming_message<'a>( } /// Update a peer's view. Sends all newly unlocked statements based on the previous +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn update_peer_view_and_send_unlocked( peer: PeerId, peer_data: &mut PeerData, @@ -780,6 +792,7 @@ async fn update_peer_view_and_send_unlocked( Ok(()) } +#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_update( peers: &mut HashMap, active_heads: &mut HashMap, @@ -867,7 +880,7 @@ async fn handle_network_update( } impl StatementDistribution { - #[tracing::instrument(skip(self, ctx), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, From 54d2641b62b93ea737ecf1d758ee86afd459af08 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 15:23:28 +0100 Subject: [PATCH 32/33] add tracing spans to overseer --- node/overseer/src/lib.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index c7bb807d53dc..b70144f69d30 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -135,6 +135,7 @@ enum ToOverseer { /// This structure exists solely for the purposes of decoupling /// `Overseer` code from the client code and the necessity to call /// `HeaderBackend::block_number_from_id()`. +#[derive(Debug)] pub struct BlockInfo { /// hash of the block. pub hash: Hash, @@ -191,16 +192,19 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { self.events_tx.send(Event::BlockImported(block)).await.map_err(Into::into) } /// Send some message to one of the `Subsystem`s. + #[tracing::instrument(level = "trace", skip(self, msg), fields(subsystem = LOG_TARGET))] pub async fn send_msg(&mut self, msg: impl Into) -> SubsystemResult<()> { self.events_tx.send(Event::MsgToSubsystem(msg.into())).await.map_err(Into::into) } /// Inform the `Overseer` that that some block was finalized. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { self.events_tx.send(Event::BlockFinalized(block)).await.map_err(Into::into) } @@ -212,6 +216,7 @@ impl OverseerHandler { /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, /// the response channel may never return if the hash was deactivated before this call. /// In this case, it's the caller's responsibility to ensure a timeout is set. + #[tracing::instrument(level = "trace", skip(self, response_channel), fields(subsystem = LOG_TARGET))] pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) -> SubsystemResult<()> { self.events_tx.send(Event::ExternalRequest(ExternalRequest::WaitForActivation { hash, @@ -220,6 +225,7 @@ impl OverseerHandler { } /// Tell `Overseer` to shutdown. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn stop(&mut self) -> SubsystemResult<()> { self.events_tx.send(Event::Stop).await.map_err(Into::into) } @@ -1287,7 +1293,7 @@ where } /// Run the `Overseer`. - #[tracing::instrument(skip(self), fields(subsystem = std::any::type_name::()))] + #[tracing::instrument(skip(self), fields(subsystem = LOG_TARGET))] pub async fn run(mut self) -> SubsystemResult<()> { let leaves = std::mem::take(&mut self.leaves); let mut update = ActiveLeavesUpdate::default(); @@ -1348,6 +1354,7 @@ where } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1377,6 +1384,7 @@ where Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1400,6 +1408,7 @@ where Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { if let Some(ref mut s) = self.candidate_validation_subsystem.instance { s.tx.send(FromOverseer::Signal(signal.clone())).await?; @@ -1464,6 +1473,7 @@ where Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn route_message(&mut self, msg: AllMessages) { self.metrics.on_message_relayed(); match msg { @@ -1545,6 +1555,7 @@ where } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_activated(&mut self, hash: &Hash) { self.metrics.on_head_activated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -1555,6 +1566,7 @@ where } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -1563,6 +1575,7 @@ where } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn clean_up_external_listeners(&mut self) { self.activation_external_listeners.retain(|_, v| { // remove dead listeners @@ -1571,6 +1584,7 @@ where }) } + #[tracing::instrument(level = "trace", skip(self, request), fields(subsystem = LOG_TARGET))] fn handle_external_request(&mut self, request: ExternalRequest) { match request { ExternalRequest::WaitForActivation { hash, response_channel } => { From b4aeb832f22832ef727ec100a4dda7c5ecd85cf7 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Thu, 12 Nov 2020 16:17:49 +0100 Subject: [PATCH 33/33] cleanup --- Cargo.lock | 1 + primitives/src/v1.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0df3553a4236..c6e6a4e2916c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5027,6 +5027,7 @@ dependencies = [ "sp-keystore", "thiserror", "tracing", + "tracing-futures", "wasm-timer", ] diff --git a/primitives/src/v1.rs b/primitives/src/v1.rs index a318871a4ca3..c4bc3a6d7347 100644 --- a/primitives/src/v1.rs +++ b/primitives/src/v1.rs @@ -467,8 +467,8 @@ impl From for CoreIndex { } /// The unique (during session) index of a validator group. -#[derive(Encode, Decode, Default, Clone, Copy)] -#[cfg_attr(feature = "std", derive(Eq, Hash, PartialEq, Debug))] +#[derive(Encode, Decode, Default, Clone, Copy, Debug)] +#[cfg_attr(feature = "std", derive(Eq, Hash, PartialEq))] pub struct GroupIndex(pub u32); impl From for GroupIndex { @@ -569,8 +569,8 @@ impl GroupRotationInfo { } /// Information about a core which is currently occupied. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[derive(Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(PartialEq))] pub struct OccupiedCore { /// The ID of the para occupying the core. pub para_id: Id, @@ -594,8 +594,8 @@ pub struct OccupiedCore { } /// Information about a core which is currently occupied. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug, Default))] +#[derive(Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(PartialEq, Default))] pub struct ScheduledCore { /// The ID of a para scheduled. pub para_id: Id,