From 89ebe84183d345aa0453e49205f39fc9daeba53a Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 14:58:17 +0100 Subject: [PATCH 01/13] Logging functionality for spans. --- node/jaeger/src/lib.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/node/jaeger/src/lib.rs b/node/jaeger/src/lib.rs index 133fa7ce7d50..fc6ecadf498a 100644 --- a/node/jaeger/src/lib.rs +++ b/node/jaeger/src/lib.rs @@ -321,6 +321,14 @@ impl Span { } } + /// Add logs to this span. + pub fn log(&mut self) -> Log<'_> { + match self { + Self::Enabled(inner) => Log { inner: Some(inner.log()) }, + Self::Disabled => Log { inner: None }, + } + } + /// Helper to check whether jaeger is enabled /// in order to avoid computational overhead. pub const fn is_enabled(&self) -> bool { @@ -331,6 +339,33 @@ impl Span { } } +/// Wrapper around jaeger logs +/// +/// Supporting disabled spans by not logging anything. +pub struct Log<'a> { + inner: Option>, +} + +impl<'a> Log<'a> { + /// Just a wrapper around `mick_jaeger::Log::with_int`. + pub fn with_int(self, key: &str, value: i64) -> Self { + if let Some(log) = self.inner { + Self{inner: Some(log.with_int(key, value))} + } else { + self + } + } + + /// Just a wrapper around `mick_jaeger::Log::with_string`. + pub fn with_string(self, key: &str, value: &str) -> Self { + if let Some(log) = self.inner { + Self{inner: Some(log.with_string(key, value))} + } else { + self + } + } +} + impl std::fmt::Debug for Span { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "") From 78e6a1cdec402cb62942b422d18c2d2b83c7d450 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 14:59:15 +0100 Subject: [PATCH 02/13] Jaeger spans for availability distribution. --- .../src/requester/fetch_task/mod.rs | 16 +++++++++++++++- .../src/requester/fetch_task/tests.rs | 1 + 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 3e187f9502e8..c976c4cbbbe9 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -33,7 +33,7 @@ use polkadot_primitives::v1::{ use polkadot_subsystem::messages::{ AllMessages, AvailabilityStoreMessage, NetworkBridgeMessage, }; -use polkadot_subsystem::SubsystemContext; +use polkadot_subsystem::{SubsystemContext, jaeger}; use crate::{ error::{Error, Result}, @@ -119,6 +119,9 @@ struct RunningTask { /// Prometheues metrics for reporting results. metrics: Metrics, + + /// Span tracking the fetching of this chunk. + span: jaeger::Span, } impl FetchTaskConfig { @@ -142,6 +145,9 @@ impl FetchTaskConfig { }; } + let mut span = jaeger::candidate_hash_span(&core.candidate_hash, "availability-distribution"); + span.add_stage(jaeger::Stage::AvailabilityDistribution); + let prepared_running = RunningTask { session_index: session_info.session_index, group_index: core.group_responsible, @@ -156,6 +162,7 @@ impl FetchTaskConfig { relay_parent: core.candidate_descriptor.relay_parent, metrics, sender, + span, }; FetchTaskConfig { live_in, @@ -254,8 +261,14 @@ impl RunningTask { let mut bad_validators = Vec::new(); let mut label = FAILED; let mut count: u32 = 0; + let mut _span = self.span.child_builder("fetch-task") + .with_chunk_index(self.request.index.0) + .with_relay_parent(&self.relay_parent) + .build(); + let mut span_log = _span.log(); // Try validators in reverse order: while let Some(validator) = self.group.pop() { + span_log = span_log.with_int("Try", count as _); // Report retries: if count > 0 { self.metrics.on_retry(); @@ -302,6 +315,7 @@ impl RunningTask { // Ok, let's store it and be happy: self.store_chunk(chunk).await; label = SUCCEEDED; + span_log.with_string("success", "true"); break; } self.metrics.on_fetch(label); diff --git a/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/node/network/availability-distribution/src/requester/fetch_task/tests.rs index 1f12000621c2..b8c70a324d3d 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -291,6 +291,7 @@ fn get_test_running_task() -> (RunningTask, mpsc::Receiver) { relay_parent: Hash::repeat_byte(71), sender: tx, metrics: Metrics::new_dummy(), + span: jaeger::Span::Disabled, }, rx ) From 7caa0bd1acc6fe9727bb3a91851560d756c40ab8 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 15:22:17 +0100 Subject: [PATCH 03/13] Fix instrumentation to use log target properly. --- node/collation-generation/src/lib.rs | 8 ++--- node/core/av-store/src/lib.rs | 4 +-- node/core/backing/src/lib.rs | 8 ++--- node/core/candidate-selection/src/lib.rs | 8 ++--- node/core/candidate-validation/src/lib.rs | 14 ++++----- node/core/chain-api/src/lib.rs | 2 +- node/core/provisioner/src/lib.rs | 14 ++++----- node/core/runtime-api/src/lib.rs | 4 +-- node/network/approval-distribution/src/lib.rs | 6 ++-- .../src/requester/mod.rs | 2 +- .../src/responder.rs | 3 +- .../src/session_cache.rs | 2 +- node/network/availability-recovery/src/lib.rs | 12 ++++---- node/network/bridge/src/lib.rs | 14 ++++----- node/network/bridge/src/network.rs | 2 +- .../network/bridge/src/validator_discovery.rs | 6 ++-- .../collator-protocol/src/collator_side.rs | 28 ++++++++--------- node/network/collator-protocol/src/lib.rs | 4 +-- .../collator-protocol/src/validator_side.rs | 30 +++++++++---------- node/network/gossip-support/src/lib.rs | 2 +- node/network/pov-distribution/src/lib.rs | 22 +++++++------- .../network/statement-distribution/src/lib.rs | 26 ++++++++-------- node/overseer/src/lib.rs | 28 ++++++++--------- 23 files changed, 125 insertions(+), 124 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 4a93152f9787..fd313f9f3b7f 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -74,7 +74,7 @@ impl CollationGenerationSubsystem { /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. /// Otherwise, most are logged and then discarded. - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run(mut self, mut ctx: Context) where Context: SubsystemContext, @@ -107,7 +107,7 @@ impl CollationGenerationSubsystem { // note: this doesn't strictly need to be a separate function; it's more an administrative function // so that we don't clutter the run loop. It could in principle be inlined directly into there. // it should hopefully therefore be ok that it's an async function mutably borrowing self. - #[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(target = LOG_TARGET))] async fn handle_incoming( &mut self, incoming: SubsystemResult>, @@ -181,7 +181,7 @@ where } } -#[tracing::instrument(level = "trace", skip(ctx, metrics, sender, activated), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, metrics, sender, activated), fields(target = LOG_TARGET))] async fn handle_new_activations( config: Arc, activated: impl IntoIterator, @@ -364,7 +364,7 @@ async fn handle_new_activations( Ok(()) } -#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))] fn erasure_root( n_validators: usize, persisted_validation: PersistedValidationData, diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 4a624a1621ae..488b20f9a9d3 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -508,7 +508,7 @@ where } } -#[tracing::instrument(skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(skip(subsystem, ctx), fields(target = LOG_TARGET))] async fn run(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) where Context: SubsystemContext, @@ -534,7 +534,7 @@ where } } -#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(target = LOG_TARGET))] async fn run_iteration( ctx: &mut Context, subsystem: &mut AvailabilityStoreSubsystem, diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index b135458ab99f..88c6f9383c8f 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -865,7 +865,7 @@ impl CandidateBackingJob { } /// Import the statement and kick off validation work if it is a part of our assignment. - #[tracing::instrument(level = "trace", skip(self, parent_span), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, parent_span), fields(target = LOG_TARGET))] async fn maybe_validate_and_import( &mut self, parent_span: &jaeger::Span, @@ -884,7 +884,7 @@ impl CandidateBackingJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn sign_statement(&self, statement: Statement) -> Option { let signed = self.table_context .validator @@ -897,7 +897,7 @@ impl CandidateBackingJob { Some(signed) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { let idx = statement.validator_index().0 as usize; @@ -987,7 +987,7 @@ impl util::JobTrait for CandidateBackingJob { const NAME: &'static str = "CandidateBackingJob"; - #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(target = LOG_TARGET))] fn run( parent: Hash, span: Arc, diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index 58ef93fb9ac9..cc62c5d9bea2 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -93,7 +93,7 @@ impl JobTrait for CandidateSelectionJob { const NAME: &'static str = "CandidateSelectionJob"; - #[tracing::instrument(skip(keystore, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(keystore, metrics, receiver, sender), fields(target = LOG_TARGET))] fn run( relay_parent: Hash, span: Arc, @@ -222,7 +222,7 @@ impl CandidateSelectionJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn handle_collation( &mut self, relay_parent: Hash, @@ -280,7 +280,7 @@ impl CandidateSelectionJob { } } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn handle_invalid(&mut self, candidate_receipt: CandidateReceipt) { let _timer = self.metrics.time_handle_invalid(); @@ -358,7 +358,7 @@ impl CandidateSelectionJob { // get a collation from the Collator Protocol subsystem // // note that this gets an owned clone of the sender; that's becuase unlike `forward_invalidity_note`, it's expected to take a while longer -#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))] async fn get_collation( relay_parent: Hash, para_id: ParaId, diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index f6ca38437bfa..f960226ae1e0 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -85,7 +85,7 @@ impl Subsystem for CandidateValidationSubsystem where } } -#[tracing::instrument(skip(ctx, spawn, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(skip(ctx, spawn, metrics), fields(target = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, spawn: impl SpawnNamed + Clone + 'static, @@ -183,7 +183,7 @@ enum AssumptionCheckOutcome { BadRequest, } -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn check_assumption_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -234,7 +234,7 @@ async fn check_assumption_validation_data( }) } -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn find_assumed_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -266,7 +266,7 @@ async fn find_assumed_validation_data( Ok(AssumptionCheckOutcome::DoesNotMatch) } -#[tracing::instrument(level = "trace", skip(ctx, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, pov, spawn, metrics), fields(target = LOG_TARGET))] async fn spawn_validate_from_chain_state( ctx: &mut impl SubsystemContext, isolation_strategy: IsolationStrategy, @@ -328,7 +328,7 @@ async fn spawn_validate_from_chain_state( validation_result } -#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn, metrics), fields(target = LOG_TARGET))] async fn spawn_validate_exhaustive( ctx: &mut impl SubsystemContext, isolation_strategy: IsolationStrategy, @@ -361,7 +361,7 @@ async fn spawn_validate_exhaustive( /// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks /// are passed, `Err` otherwise. -#[tracing::instrument(level = "trace", skip(pov), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(pov), fields(target = LOG_TARGET))] fn perform_basic_checks( candidate: &CandidateDescriptor, max_pov_size: u32, @@ -419,7 +419,7 @@ impl ValidationBackend for RealValidationBackend { /// Validates the candidate from exhaustive parameters. /// /// Sends the result of validation on the channel once complete. -#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn, metrics), fields(target = LOG_TARGET))] fn validate_candidate_exhaustive( backend_arg: B::Arg, persisted_validation_data: PersistedValidationData, diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs index 534c41e3ef85..262239c81374 100644 --- a/node/core/chain-api/src/lib.rs +++ b/node/core/chain-api/src/lib.rs @@ -77,7 +77,7 @@ impl Subsystem for ChainApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(skip(ctx, subsystem), fields(target = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, subsystem: ChainApiSubsystem, diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index f773ec27a8dc..dff23e254cef 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -138,7 +138,7 @@ impl JobTrait for ProvisioningJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop - #[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(target = LOG_TARGET))] fn run( relay_parent: Hash, span: Arc, @@ -238,7 +238,7 @@ impl ProvisioningJob { } } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn note_provisionable_data(&mut self, provisionable_data: ProvisionableData) { match provisionable_data { ProvisionableData::Bitfield(_, signed_bitfield) => { @@ -271,7 +271,7 @@ type CoreAvailability = BitVec; /// When we're choosing bitfields to include, the rule should be simple: /// maximize availability. So basically, include all bitfields. And then /// choose a coherent set of candidates along with that. -#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(target = LOG_TARGET))] async fn send_inherent_data( relay_parent: Hash, bitfields: &[SignedAvailabilityBitfield], @@ -311,7 +311,7 @@ async fn send_inherent_data( /// /// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated /// to the sorting of the input. -#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))] fn select_availability_bitfields( cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -343,7 +343,7 @@ fn select_availability_bitfields( } /// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. -#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))] async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -465,7 +465,7 @@ async fn select_candidates( /// Produces a block number 1 higher than that of the relay parent /// in the event of an invalid `relay_parent`, returns `Ok(0)` -#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))] async fn get_block_number_under_construction( relay_parent: Hash, sender: &mut mpsc::Sender, @@ -491,7 +491,7 @@ async fn get_block_number_under_construction( /// - construct a transverse slice along `core_idx` /// - bitwise-or it with the availability slice /// - count the 1 bits, compare to the total length; true on 2/3+ -#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))] fn bitfields_indicate_availability( core_idx: usize, bitfields: &[SignedAvailabilityBitfield], diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index cb32dd57728a..e9f9adbbd243 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -257,7 +257,7 @@ impl RuntimeApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(skip(ctx, subsystem), fields(target = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, mut subsystem: RuntimeApiSubsystem, @@ -282,7 +282,7 @@ async fn run( } } -#[tracing::instrument(level = "trace", skip(client, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(client, metrics), fields(target = LOG_TARGET))] fn make_runtime_api_request( client: Arc, metrics: Metrics, diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 4d98d58ba79c..9bdee6d5eff2 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -830,7 +830,7 @@ impl State { /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn modify_reputation( ctx: &mut impl SubsystemContext, peer_id: PeerId, @@ -854,7 +854,7 @@ impl ApprovalDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run(self, ctx: Context) where Context: SubsystemContext, @@ -864,7 +864,7 @@ impl ApprovalDistribution { } /// Used for testing. - #[tracing::instrument(skip(self, ctx, state), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx, state), fields(target = LOG_TARGET))] async fn run_inner(self, mut ctx: Context, state: &mut State) where Context: SubsystemContext, diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index 914a86ef7def..a2bf7d110823 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -213,7 +213,7 @@ impl Stream for Requester { } /// Query all hashes and descriptors of candidates pending availability at a particular block. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn query_occupied_cores( ctx: &mut Context, relay_parent: Hash, diff --git a/node/network/availability-distribution/src/responder.rs b/node/network/availability-distribution/src/responder.rs index c094b17fd666..83dd9ceef258 100644 --- a/node/network/availability-distribution/src/responder.rs +++ b/node/network/availability-distribution/src/responder.rs @@ -57,6 +57,7 @@ where /// Answer an incoming chunk request by querying the av store. /// /// Returns: Ok(true) if chunk was found and served. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] pub async fn answer_request( ctx: &mut Context, req: IncomingRequest, @@ -78,7 +79,7 @@ where } /// Query chunk from the availability store. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn query_chunk( ctx: &mut Context, candidate_hash: CandidateHash, diff --git a/node/network/availability-distribution/src/session_cache.rs b/node/network/availability-distribution/src/session_cache.rs index 15f5cad3c757..8e4ff9ec05f3 100644 --- a/node/network/availability-distribution/src/session_cache.rs +++ b/node/network/availability-distribution/src/session_cache.rs @@ -182,7 +182,7 @@ impl SessionCache { /// /// We assume validators in a group are tried in reverse order, so the reported bad validators /// will be put at the beginning of the group. - #[tracing::instrument(level = "trace", skip(self, report), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, report), fields(target = LOG_TARGET))] pub fn report_bad(&mut self, report: BadValidators) -> Result<()> { let session = self .session_info_cache diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index a18fe1eda96d..a3ffb5c3d1f1 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -589,7 +589,7 @@ async fn report_peer( } /// Machinery around launching interactions into the background. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn launch_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -654,7 +654,7 @@ async fn launch_interaction( } /// Handles an availability recovery request. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_recover( state: &mut State, ctx: &mut impl SubsystemContext, @@ -718,7 +718,7 @@ async fn handle_recover( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn query_chunk( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -733,7 +733,7 @@ async fn query_chunk( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn query_full_data( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -747,7 +747,7 @@ async fn query_full_data( } /// Handles message from interaction. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_from_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -827,7 +827,7 @@ async fn handle_from_interaction( } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index a49363846cee..e2ce2eaf85b7 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -142,7 +142,7 @@ struct PeerData { } /// Main driver, processing network events and messages from other subsystems. -#[tracing::instrument(skip(bridge, ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(skip(bridge, ctx), fields(target = LOG_TARGET))] async fn run_network( mut bridge: NetworkBridge, mut ctx: impl SubsystemContext, @@ -417,7 +417,7 @@ fn construct_view(live_heads: impl DoubleEndedIterator, finalized_n ) } -#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(target = LOG_TARGET))] async fn update_our_view( net: &mut impl Network, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn update_our_view( // Handle messages on a specific peer-set. The peer is expected to be connected on that // peer-set. -#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(target = LOG_TARGET))] async fn handle_peer_messages( peer: PeerId, peers: &mut HashMap, @@ -516,7 +516,7 @@ async fn handle_peer_messages( Ok(outgoing_messages) } -#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] async fn send_validation_message( net: &mut impl Network, peers: I, @@ -529,7 +529,7 @@ async fn send_validation_message( send_message(net, peers, PeerSet::Validation, message).await } -#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] async fn send_collation_message( net: &mut impl Network, peers: I, @@ -557,7 +557,7 @@ async fn dispatch_collation_event_to_all( dispatch_collation_events_to_all(std::iter::once(event), ctx).await } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] async fn dispatch_validation_events_to_all( events: I, ctx: &mut impl SubsystemContext, @@ -569,7 +569,7 @@ async fn dispatch_validation_events_to_all( ctx.send_messages(events.into_iter().flat_map(AllMessages::dispatch_iter)).await } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] async fn dispatch_collation_events_to_all( events: I, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index ed25f9f36827..4700a0549f52 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -151,7 +151,7 @@ impl Network for Arc> { NetworkService::event_stream(self, "polkadot-network-bridge").boxed() } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn action_sink<'a>( &'a mut self, ) -> Pin + Send + 'a>> { diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 87ff378fd622..e4c3cbbb7d04 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -169,7 +169,7 @@ impl Service { /// Find connected validators using the given `validator_ids`. /// /// Returns a [`HashMap`] that contains the found [`AuthorityDiscoveryId`]'s and their associated [`PeerId`]'s. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] async fn find_connected_validators( &mut self, validator_ids: &[AuthorityDiscoveryId], @@ -216,7 +216,7 @@ impl Service { /// This method will also clean up all previously revoked requests. /// it takes `network_service` and `authority_discovery_service` by value /// and returns them as a workaround for the Future: Send requirement imposed by async fn impl. - #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(target = LOG_TARGET))] pub async fn on_request( &mut self, validator_ids: Vec, @@ -335,7 +335,7 @@ impl Service { } /// Should be called when a peer connected. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] pub async fn on_peer_connected( &mut self, peer_id: PeerId, diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 91d6768dc665..7f28e58697f0 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -260,7 +260,7 @@ impl State { /// or the relay-parent isn't in the active-leaves set, we ignore the message /// as it must be invalid in that case - although this indicates a logic error /// elsewhere in the node. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn distribute_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -338,7 +338,7 @@ async fn distribute_collation( /// Get the Id of the Core that is assigned to the para being collated on if any /// and the total number of cores. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn determine_core( ctx: &mut impl SubsystemContext, para_id: ParaId, @@ -360,7 +360,7 @@ async fn determine_core( /// Figure out current and next group of validators assigned to the para being collated on. /// /// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn determine_our_validators( ctx: &mut impl SubsystemContext, core_index: CoreIndex, @@ -386,7 +386,7 @@ async fn determine_our_validators( } /// Issue a `Declare` collation message to the given `peer`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn declare( ctx: &mut impl SubsystemContext, state: &mut State, @@ -404,7 +404,7 @@ async fn declare( /// Issue a connection request to a set of validators and /// revoke the previous connection request. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn connect_to_validators( ctx: &mut impl SubsystemContext, relay_parent: Hash, @@ -428,7 +428,7 @@ async fn connect_to_validators( /// /// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is /// set as validator for our para at the given `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn advertise_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -484,7 +484,7 @@ async fn advertise_collation( } /// The main incoming message dispatching switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn process_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -568,7 +568,7 @@ async fn process_msg( } /// Issue a response to a previously requested collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn send_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -602,7 +602,7 @@ async fn send_collation( } /// A networking messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_incoming_peer_message( ctx: &mut impl SubsystemContext, state: &mut State, @@ -685,7 +685,7 @@ async fn handle_incoming_peer_message( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut impl SubsystemContext, state: &mut State, @@ -706,7 +706,7 @@ async fn handle_peer_view_change( /// A validator is connected. /// /// `Declare` that we are a collator with a given `CollatorId`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_validator_connected( ctx: &mut impl SubsystemContext, state: &mut State, @@ -735,7 +735,7 @@ async fn handle_validator_connected( } /// Bridge messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -767,7 +767,7 @@ async fn handle_network_msg( } /// Handles our view changes. -#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -810,7 +810,7 @@ async fn handle_our_view_change( } /// The collator protocol collator side main loop. -#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] pub(crate) async fn run( mut ctx: impl SubsystemContext, our_id: CollatorId, diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index a1201c597e9c..33037d736659 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -86,7 +86,7 @@ impl CollatorProtocolSubsystem { } } - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -126,7 +126,7 @@ where } /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) where Context: SubsystemContext, diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index a3a8216e5fc7..a81bd9413e59 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -214,7 +214,7 @@ struct State { } /// Another subsystem has requested to fetch collations on a particular leaf for some para. -#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(target = LOG_TARGET))] async fn fetch_collation( ctx: &mut Context, state: &mut State, @@ -242,7 +242,7 @@ where } /// Report a collator for some malicious actions. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn report_collator( ctx: &mut Context, state: &mut State, @@ -260,7 +260,7 @@ where } /// Some other subsystem has reported a collator as a good one, bump reputation. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn note_good_collation( ctx: &mut Context, state: &mut State, @@ -275,7 +275,7 @@ where } /// Notify a collator that its collation got seconded. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn notify_collation_seconded( ctx: &mut impl SubsystemContext, state: &mut State, @@ -310,7 +310,7 @@ async fn notify_collation_seconded( /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be cancelled. /// - Advertisements by this peer that are no longer relevant have to be removed. -#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn handle_peer_view_change( state: &mut State, peer_id: PeerId, @@ -352,7 +352,7 @@ async fn handle_peer_view_change( /// - Cancel all ongoing requests /// - Reply to interested parties if any /// - Store collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn received_collation( ctx: &mut Context, state: &mut State, @@ -418,7 +418,7 @@ where /// - Check if the requested collation is in our view. /// - Update PerRequest records with the `result` field if necessary. /// And as such invocations of this function may rely on that. -#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(target = LOG_TARGET))] async fn request_collation( ctx: &mut Context, state: &mut State, @@ -498,7 +498,7 @@ where } /// Notify `CandidateSelectionSubsystem` that a collation has been advertised. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn notify_candidate_selection( ctx: &mut Context, collator: CollatorId, @@ -518,7 +518,7 @@ where } /// Networking message has been received. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, @@ -567,7 +567,7 @@ where /// A leaf has become inactive so we want to /// - Cancel all ongoing collation requests that are on top of that leaf. /// - Remove all stored collations relevant to that leaf. -#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn remove_relay_parent( state: &mut State, relay_parent: Hash, @@ -591,7 +591,7 @@ async fn remove_relay_parent( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -626,7 +626,7 @@ async fn handle_our_view_change( } /// A request has timed out. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn request_timed_out( ctx: &mut Context, state: &mut State, @@ -650,7 +650,7 @@ where } /// Bridge event switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -685,7 +685,7 @@ where } /// The main message receiver switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn process_msg( ctx: &mut Context, msg: CollatorProtocolMessage, @@ -742,7 +742,7 @@ where } /// The main run loop. -#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] pub(crate) async fn run( mut ctx: Context, request_timeout: Duration, diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 8ade01a0ced8..aa1a9ea07398 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -54,7 +54,7 @@ impl GossipSupport { Self {} } - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run(self, mut ctx: Context) where Context: SubsystemContext, diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index fc18fb8fb820..f98a470f4bc7 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -144,7 +144,7 @@ fn send_pov_message( /// Handles the signal. If successful, returns `true` if the subsystem should conclude, /// `false` otherwise. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_signal( state: &mut State, ctx: &mut impl SubsystemContext, @@ -211,7 +211,7 @@ async fn handle_signal( /// Notify peers that we are awaiting a given PoV hash. /// /// This only notifies peers who have the relay parent in their view. -#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] async fn notify_all_we_are_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -240,7 +240,7 @@ async fn notify_all_we_are_awaiting( } /// Notify one peer about everything we're awaiting at a given relay-parent. -#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(target = LOG_TARGET))] async fn notify_one_we_are_awaiting_many( peer: &PeerId, ctx: &mut impl SubsystemContext, @@ -267,7 +267,7 @@ async fn notify_one_we_are_awaiting_many( } /// Distribute a PoV to peers who are awaiting it. -#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(target = LOG_TARGET))] async fn distribute_to_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -408,7 +408,7 @@ async fn determine_relevant_validators( } /// Handles a `FetchPoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(target = LOG_TARGET))] async fn handle_fetch( state: &mut State, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn handle_fetch( } /// Handles a `DistributePoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn handle_distribute( state: &mut State, ctx: &mut impl SubsystemContext, @@ -512,7 +512,7 @@ async fn handle_distribute( } /// Report a reputation change for a peer. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn report_peer( ctx: &mut impl SubsystemContext, peer: PeerId, @@ -522,7 +522,7 @@ async fn report_peer( } /// Handle a notification from a peer that they are awaiting some PoVs. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_awaiting( state: &mut State, ctx: &mut impl SubsystemContext, @@ -576,7 +576,7 @@ async fn handle_awaiting( /// Handle an incoming PoV from our peer. Reports them if unexpected, rewards them if not. /// /// Completes any requests awaiting that PoV. -#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), fields(target = LOG_TARGET))] async fn handle_incoming_pov( state: &mut State, ctx: &mut impl SubsystemContext, @@ -663,7 +663,7 @@ fn handle_validator_connected(state: &mut State, peer_id: PeerId) { } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, @@ -733,7 +733,7 @@ impl PoVDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run( self, ctx: impl SubsystemContext, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 5527c6344ccb..9c0f1bf7bf42 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -162,7 +162,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option { let already_known = self.sent_statements.contains(fingerprint) || self.received_statements.contains(fingerprint); @@ -211,7 +211,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn receive( &mut self, fingerprint: &(CompactStatement, ValidatorIndex), @@ -278,7 +278,7 @@ impl PeerData { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn send( &mut self, relay_parent: &Hash, @@ -303,7 +303,7 @@ impl PeerData { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn receive( &mut self, relay_parent: &Hash, @@ -422,7 +422,7 @@ impl ActiveHeadData { /// /// Any other statements or those that reference a candidate we are not aware of cannot be accepted /// and will return `NotedStatement::NotUseful`. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -503,7 +503,7 @@ fn check_statement_signature( /// circulates the statement to all peers who have not seen it yet, and /// sends all statements dependent on that statement to peers who could previously not receive /// them but now can. -#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(target = LOG_TARGET))] async fn circulate_statement_and_dependents( peers: &mut HashMap, active_heads: &mut HashMap, @@ -568,7 +568,7 @@ fn statement_message(relay_parent: Hash, statement: SignedFullStatement) /// Circulates a statement to all peers who have not seen it yet, and returns /// an iterator over peers who need to have dependent statements sent. -#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] async fn circulate_statement( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -602,7 +602,7 @@ async fn circulate_statement( } /// Send all statements about a given candidate hash to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] async fn send_statements_about( peer: PeerId, peer_data: &mut PeerData, @@ -629,7 +629,7 @@ async fn send_statements_about( } /// Send all statements at a given relay-parent to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] async fn send_statements( peer: PeerId, peer_data: &mut PeerData, @@ -669,7 +669,7 @@ async fn report_peer( // // This function checks the signature and ensures the statement is compatible with our // view. It also notifies candidate backing if the statement was previously unknown. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] async fn handle_incoming_message<'a>( peer: PeerId, peer_data: &mut PeerData, @@ -766,7 +766,7 @@ async fn handle_incoming_message<'a>( } /// Update a peer's view. Sends all newly unlocked statements based on the previous -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] async fn update_peer_view_and_send_unlocked( peer: PeerId, peer_data: &mut PeerData, @@ -801,7 +801,7 @@ async fn update_peer_view_and_send_unlocked( } } -#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(subsystem = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(target = LOG_TARGET))] async fn handle_network_update( peers: &mut HashMap, active_heads: &mut HashMap, @@ -889,7 +889,7 @@ async fn handle_network_update( } impl StatementDistribution { - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index ae88e53805da..696d6db66c63 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -197,19 +197,19 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] pub async fn block_imported(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockImported(block)).await } /// Send some message to one of the `Subsystem`s. - #[tracing::instrument(level = "trace", skip(self, msg), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, msg), fields(target = LOG_TARGET))] pub async fn send_msg(&mut self, msg: impl Into) { self.send_and_log_error(Event::MsgToSubsystem(msg.into())).await } /// Inform the `Overseer` that some block was finalized. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] pub async fn block_finalized(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockFinalized(block)).await } @@ -221,7 +221,7 @@ impl OverseerHandler { /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, /// the response channel may never return if the hash was deactivated before this call. /// In this case, it's the caller's responsibility to ensure a timeout is set. - #[tracing::instrument(level = "trace", skip(self, response_channel), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, response_channel), fields(target = LOG_TARGET))] pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) { self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation { hash, @@ -230,7 +230,7 @@ impl OverseerHandler { } /// Tell `Overseer` to shutdown. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] pub async fn stop(&mut self) { self.send_and_log_error(Event::Stop).await } @@ -1776,7 +1776,7 @@ where } /// Run the `Overseer`. - #[tracing::instrument(skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(skip(self), fields(target = LOG_TARGET))] pub async fn run(mut self) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1856,7 +1856,7 @@ where } } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { match self.active_leaves.entry(block.hash) { hash_map::Entry::Vacant(entry) => entry.insert(block.number), @@ -1880,7 +1880,7 @@ where self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1909,7 +1909,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { self.candidate_validation_subsystem.send_signal(signal.clone()).await?; self.candidate_backing_subsystem.send_signal(signal.clone()).await?; @@ -1934,7 +1934,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn route_message(&mut self, msg: MaybeTimed) -> SubsystemResult<()> { let msg = msg.into_inner(); self.metrics.on_message_relayed(); @@ -2001,7 +2001,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn on_head_activated(&mut self, hash: &Hash, parent_hash: Option) -> Arc { self.metrics.on_head_activated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -2022,14 +2022,14 @@ where span } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); self.activation_external_listeners.remove(hash); self.span_per_active_leaf.remove(hash); } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn clean_up_external_listeners(&mut self) { self.activation_external_listeners.retain(|_, v| { // remove dead listeners @@ -2038,7 +2038,7 @@ where }) } - #[tracing::instrument(level = "trace", skip(self, request), fields(subsystem = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, request), fields(target = LOG_TARGET))] fn handle_external_request(&mut self, request: ExternalRequest) { match request { ExternalRequest::WaitForActivation { hash, response_channel } => { From fc9d82ad201edd0c98017a497046bed6cd1550db Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 15:43:06 +0100 Subject: [PATCH 04/13] Add some tracing instrumentation macros. --- .../availability-distribution/src/requester/fetch_task/mod.rs | 2 ++ node/network/availability-distribution/src/requester/mod.rs | 2 ++ node/network/availability-distribution/src/session_cache.rs | 1 + 3 files changed, 5 insertions(+) diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index c976c4cbbbe9..fb9d6fbb2dd8 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -175,6 +175,7 @@ impl FetchTask { /// Start fetching a chunk. /// /// A task handling the fetching of the configured chunk will be spawned. + #[tracing::instrument(level = "trace", skip(config, ctx), fields(target = LOG_TARGET))] pub async fn start(config: FetchTaskConfig, ctx: &mut Context) -> Result where Context: SubsystemContext, @@ -247,6 +248,7 @@ enum TaskError { } impl RunningTask { + #[tracing::instrument(level = "trace", skip(self, kill), fields(target = LOG_TARGET))] async fn run(self, kill: oneshot::Receiver<()>) { // Wait for completion/or cancel. let run_it = self.run_inner(); diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index a2bf7d110823..31b823f7367f 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -74,6 +74,7 @@ impl Requester { /// /// You must feed it with `ActiveLeavesUpdate` via `update_fetching_heads` and make it progress /// by advancing the stream. + #[tracing::instrument(level = "trace", skip(keystore, metrics), fields(target = LOG_TARGET))] pub fn new(keystore: SyncCryptoStorePtr, metrics: Metrics) -> Self { // All we do is forwarding messages, no need to make this big. // Each sender will get one slot, see @@ -90,6 +91,7 @@ impl Requester { /// Update heads that need availability distribution. /// /// For all active heads we will be fetching our chunks for availabilty distribution. + #[tracing::instrument(level = "trace", skip(self, ctx, update), fields(target = LOG_TARGET))] pub async fn update_fetching_heads( &mut self, ctx: &mut Context, diff --git a/node/network/availability-distribution/src/session_cache.rs b/node/network/availability-distribution/src/session_cache.rs index 8e4ff9ec05f3..471cffab8892 100644 --- a/node/network/availability-distribution/src/session_cache.rs +++ b/node/network/availability-distribution/src/session_cache.rs @@ -116,6 +116,7 @@ impl SessionCache { /// /// Use this function over any `fetch_session_info` if all you need is a reference to /// `SessionInfo`, as it avoids an expensive clone. + #[tracing::instrument(level = "trace", skip(self, ctx, with_info), fields(target = LOG_TARGET))] pub async fn with_session_info( &mut self, ctx: &mut Context, From cad2c6a7fc6a4f796c383a783e4a662d63f2cfda Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 16:41:42 +0100 Subject: [PATCH 05/13] Use int_tags instead of logs. --- node/jaeger/src/lib.rs | 8 ++++++++ .../src/requester/fetch_task/mod.rs | 5 ++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/node/jaeger/src/lib.rs b/node/jaeger/src/lib.rs index fc6ecadf498a..48fc748d85fa 100644 --- a/node/jaeger/src/lib.rs +++ b/node/jaeger/src/lib.rs @@ -313,6 +313,14 @@ impl Span { } } + /// Add an additional int tag to the span. + pub fn add_int_tag(&mut self, tag: &str, value: i64) { + match self { + Self::Enabled(ref mut inner) => inner.add_int_tag(tag, value), + Self::Disabled => {}, + } + } + /// Adds the `FollowsFrom` relationship to this span with respect to the given one. pub fn add_follows_from(&mut self, other: &Self) { match (self, other) { diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index fb9d6fbb2dd8..70e5fc2adc7e 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -267,10 +267,8 @@ impl RunningTask { .with_chunk_index(self.request.index.0) .with_relay_parent(&self.relay_parent) .build(); - let mut span_log = _span.log(); // Try validators in reverse order: while let Some(validator) = self.group.pop() { - span_log = span_log.with_int("Try", count as _); // Report retries: if count > 0 { self.metrics.on_retry(); @@ -317,9 +315,10 @@ impl RunningTask { // Ok, let's store it and be happy: self.store_chunk(chunk).await; label = SUCCEEDED; - span_log.with_string("success", "true"); + _span.add_string_tag("success", "true"); break; } + _span.add_int_tag("tries", count as _); self.metrics.on_fetch(label); self.conclude(bad_validators).await; } From 78dee7a82dabbc9600eeee0077f5154e3378efb4 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 16:59:12 +0100 Subject: [PATCH 06/13] Add span per iteration. --- .../availability-distribution/src/requester/fetch_task/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 70e5fc2adc7e..bba72ae6358d 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -269,6 +269,7 @@ impl RunningTask { .build(); // Try validators in reverse order: while let Some(validator) = self.group.pop() { + let _try_span = _span.child("try"); // Report retries: if count > 0 { self.metrics.on_retry(); From cee4f11bef6049b81a814a167f6df3c4b158e90d Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 16:59:38 +0100 Subject: [PATCH 07/13] Remove span::log functionality. --- node/jaeger/src/lib.rs | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/node/jaeger/src/lib.rs b/node/jaeger/src/lib.rs index 48fc748d85fa..179734e84fe2 100644 --- a/node/jaeger/src/lib.rs +++ b/node/jaeger/src/lib.rs @@ -329,14 +329,6 @@ impl Span { } } - /// Add logs to this span. - pub fn log(&mut self) -> Log<'_> { - match self { - Self::Enabled(inner) => Log { inner: Some(inner.log()) }, - Self::Disabled => Log { inner: None }, - } - } - /// Helper to check whether jaeger is enabled /// in order to avoid computational overhead. pub const fn is_enabled(&self) -> bool { @@ -347,33 +339,6 @@ impl Span { } } -/// Wrapper around jaeger logs -/// -/// Supporting disabled spans by not logging anything. -pub struct Log<'a> { - inner: Option>, -} - -impl<'a> Log<'a> { - /// Just a wrapper around `mick_jaeger::Log::with_int`. - pub fn with_int(self, key: &str, value: i64) -> Self { - if let Some(log) = self.inner { - Self{inner: Some(log.with_int(key, value))} - } else { - self - } - } - - /// Just a wrapper around `mick_jaeger::Log::with_string`. - pub fn with_string(self, key: &str, value: &str) -> Self { - if let Some(log) = self.inner { - Self{inner: Some(log.with_string(key, value))} - } else { - self - } - } -} - impl std::fmt::Debug for Span { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "") From e1c2a2e6ff6f257e702f07d8a77c2668af92b0ef Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 17:02:59 +0100 Subject: [PATCH 08/13] Fix instrumentation log target for real. --- node/core/av-store/src/lib.rs | 4 +-- node/core/backing/src/lib.rs | 8 ++--- node/network/approval-distribution/src/lib.rs | 6 ++-- .../src/session_cache.rs | 4 +-- node/network/availability-recovery/src/lib.rs | 12 ++++---- node/network/bridge/src/lib.rs | 14 ++++----- node/network/bridge/src/network.rs | 2 +- .../network/bridge/src/validator_discovery.rs | 6 ++-- .../collator-protocol/src/collator_side.rs | 28 ++++++++--------- node/network/collator-protocol/src/lib.rs | 4 +-- .../collator-protocol/src/validator_side.rs | 30 +++++++++---------- node/network/gossip-support/src/lib.rs | 2 +- node/network/pov-distribution/src/lib.rs | 22 +++++++------- .../network/statement-distribution/src/lib.rs | 26 ++++++++-------- node/overseer/src/lib.rs | 28 ++++++++--------- 15 files changed, 98 insertions(+), 98 deletions(-) diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 488b20f9a9d3..2b5dcbd9f809 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -508,7 +508,7 @@ where } } -#[tracing::instrument(skip(subsystem, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(subsystem, ctx), target = LOG_TARGET)] async fn run(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) where Context: SubsystemContext, @@ -534,7 +534,7 @@ where } } -#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(subsystem, ctx), target = LOG_TARGET)] async fn run_iteration( ctx: &mut Context, subsystem: &mut AvailabilityStoreSubsystem, diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 88c6f9383c8f..a9422e71e3bf 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -865,7 +865,7 @@ impl CandidateBackingJob { } /// Import the statement and kick off validation work if it is a part of our assignment. - #[tracing::instrument(level = "trace", skip(self, parent_span), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, parent_span), target = LOG_TARGET)] async fn maybe_validate_and_import( &mut self, parent_span: &jaeger::Span, @@ -884,7 +884,7 @@ impl CandidateBackingJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] async fn sign_statement(&self, statement: Statement) -> Option { let signed = self.table_context .validator @@ -897,7 +897,7 @@ impl CandidateBackingJob { Some(signed) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { let idx = statement.validator_index().0 as usize; @@ -987,7 +987,7 @@ impl util::JobTrait for CandidateBackingJob { const NAME: &'static str = "CandidateBackingJob"; - #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), target = LOG_TARGET)] fn run( parent: Hash, span: Arc, diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 9bdee6d5eff2..d839fa66c507 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -830,7 +830,7 @@ impl State { /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn modify_reputation( ctx: &mut impl SubsystemContext, peer_id: PeerId, @@ -854,7 +854,7 @@ impl ApprovalDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] async fn run(self, ctx: Context) where Context: SubsystemContext, @@ -864,7 +864,7 @@ impl ApprovalDistribution { } /// Used for testing. - #[tracing::instrument(skip(self, ctx, state), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx, state), target = LOG_TARGET)] async fn run_inner(self, mut ctx: Context, state: &mut State) where Context: SubsystemContext, diff --git a/node/network/availability-distribution/src/session_cache.rs b/node/network/availability-distribution/src/session_cache.rs index 471cffab8892..705dc6b7236e 100644 --- a/node/network/availability-distribution/src/session_cache.rs +++ b/node/network/availability-distribution/src/session_cache.rs @@ -116,7 +116,7 @@ impl SessionCache { /// /// Use this function over any `fetch_session_info` if all you need is a reference to /// `SessionInfo`, as it avoids an expensive clone. - #[tracing::instrument(level = "trace", skip(self, ctx, with_info), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, ctx, with_info), target = LOG_TARGET)] pub async fn with_session_info( &mut self, ctx: &mut Context, @@ -183,7 +183,7 @@ impl SessionCache { /// /// We assume validators in a group are tried in reverse order, so the reported bad validators /// will be put at the beginning of the group. - #[tracing::instrument(level = "trace", skip(self, report), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, report), target = LOG_TARGET)] pub fn report_bad(&mut self, report: BadValidators) -> Result<()> { let session = self .session_info_cache diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index a3ffb5c3d1f1..466922747c31 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -589,7 +589,7 @@ async fn report_peer( } /// Machinery around launching interactions into the background. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn launch_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -654,7 +654,7 @@ async fn launch_interaction( } /// Handles an availability recovery request. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_recover( state: &mut State, ctx: &mut impl SubsystemContext, @@ -718,7 +718,7 @@ async fn handle_recover( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn query_chunk( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -733,7 +733,7 @@ async fn query_chunk( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn query_full_data( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -747,7 +747,7 @@ async fn query_full_data( } /// Handles message from interaction. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_from_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -827,7 +827,7 @@ async fn handle_from_interaction( } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index e2ce2eaf85b7..7bbb61a1bba9 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -142,7 +142,7 @@ struct PeerData { } /// Main driver, processing network events and messages from other subsystems. -#[tracing::instrument(skip(bridge, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(bridge, ctx), target = LOG_TARGET)] async fn run_network( mut bridge: NetworkBridge, mut ctx: impl SubsystemContext, @@ -417,7 +417,7 @@ fn construct_view(live_heads: impl DoubleEndedIterator, finalized_n ) } -#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), target = LOG_TARGET)] async fn update_our_view( net: &mut impl Network, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn update_our_view( // Handle messages on a specific peer-set. The peer is expected to be connected on that // peer-set. -#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, messages, net), target = LOG_TARGET)] async fn handle_peer_messages( peer: PeerId, peers: &mut HashMap, @@ -516,7 +516,7 @@ async fn handle_peer_messages( Ok(outgoing_messages) } -#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, peers), target = LOG_TARGET)] async fn send_validation_message( net: &mut impl Network, peers: I, @@ -529,7 +529,7 @@ async fn send_validation_message( send_message(net, peers, PeerSet::Validation, message).await } -#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, peers), target = LOG_TARGET)] async fn send_collation_message( net: &mut impl Network, peers: I, @@ -557,7 +557,7 @@ async fn dispatch_collation_event_to_all( dispatch_collation_events_to_all(std::iter::once(event), ctx).await } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(events, ctx), target = LOG_TARGET)] async fn dispatch_validation_events_to_all( events: I, ctx: &mut impl SubsystemContext, @@ -569,7 +569,7 @@ async fn dispatch_validation_events_to_all( ctx.send_messages(events.into_iter().flat_map(AllMessages::dispatch_iter)).await } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(events, ctx), target = LOG_TARGET)] async fn dispatch_collation_events_to_all( events: I, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 4700a0549f52..713b56c009c7 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -151,7 +151,7 @@ impl Network for Arc> { NetworkService::event_stream(self, "polkadot-network-bridge").boxed() } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn action_sink<'a>( &'a mut self, ) -> Pin + Send + 'a>> { diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index e4c3cbbb7d04..36a6093d2ad5 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -169,7 +169,7 @@ impl Service { /// Find connected validators using the given `validator_ids`. /// /// Returns a [`HashMap`] that contains the found [`AuthorityDiscoveryId`]'s and their associated [`PeerId`]'s. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), target = LOG_TARGET)] async fn find_connected_validators( &mut self, validator_ids: &[AuthorityDiscoveryId], @@ -216,7 +216,7 @@ impl Service { /// This method will also clean up all previously revoked requests. /// it takes `network_service` and `authority_discovery_service` by value /// and returns them as a workaround for the Future: Send requirement imposed by async fn impl. - #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), target = LOG_TARGET)] pub async fn on_request( &mut self, validator_ids: Vec, @@ -335,7 +335,7 @@ impl Service { } /// Should be called when a peer connected. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), target = LOG_TARGET)] pub async fn on_peer_connected( &mut self, peer_id: PeerId, diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 7f28e58697f0..2c85c63404c2 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -260,7 +260,7 @@ impl State { /// or the relay-parent isn't in the active-leaves set, we ignore the message /// as it must be invalid in that case - although this indicates a logic error /// elsewhere in the node. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] async fn distribute_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -338,7 +338,7 @@ async fn distribute_collation( /// Get the Id of the Core that is assigned to the para being collated on if any /// and the total number of cores. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn determine_core( ctx: &mut impl SubsystemContext, para_id: ParaId, @@ -360,7 +360,7 @@ async fn determine_core( /// Figure out current and next group of validators assigned to the para being collated on. /// /// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn determine_our_validators( ctx: &mut impl SubsystemContext, core_index: CoreIndex, @@ -386,7 +386,7 @@ async fn determine_our_validators( } /// Issue a `Declare` collation message to the given `peer`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn declare( ctx: &mut impl SubsystemContext, state: &mut State, @@ -404,7 +404,7 @@ async fn declare( /// Issue a connection request to a set of validators and /// revoke the previous connection request. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn connect_to_validators( ctx: &mut impl SubsystemContext, relay_parent: Hash, @@ -428,7 +428,7 @@ async fn connect_to_validators( /// /// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is /// set as validator for our para at the given `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn advertise_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -484,7 +484,7 @@ async fn advertise_collation( } /// The main incoming message dispatching switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn process_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -568,7 +568,7 @@ async fn process_msg( } /// Issue a response to a previously requested collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] async fn send_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -602,7 +602,7 @@ async fn send_collation( } /// A networking messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_incoming_peer_message( ctx: &mut impl SubsystemContext, state: &mut State, @@ -685,7 +685,7 @@ async fn handle_incoming_peer_message( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_peer_view_change( ctx: &mut impl SubsystemContext, state: &mut State, @@ -706,7 +706,7 @@ async fn handle_peer_view_change( /// A validator is connected. /// /// `Declare` that we are a collator with a given `CollatorId`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_validator_connected( ctx: &mut impl SubsystemContext, state: &mut State, @@ -735,7 +735,7 @@ async fn handle_validator_connected( } /// Bridge messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_network_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -767,7 +767,7 @@ async fn handle_network_msg( } /// Handles our view changes. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -810,7 +810,7 @@ async fn handle_our_view_change( } /// The collator protocol collator side main loop. -#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(ctx, metrics), target = LOG_TARGET)] pub(crate) async fn run( mut ctx: impl SubsystemContext, our_id: CollatorId, diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 33037d736659..c9b49dd436ab 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -86,7 +86,7 @@ impl CollatorProtocolSubsystem { } } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -126,7 +126,7 @@ where } /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) where Context: SubsystemContext, diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index a81bd9413e59..561201518565 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -214,7 +214,7 @@ struct State { } /// Another subsystem has requested to fetch collations on a particular leaf for some para. -#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, tx), target = LOG_TARGET)] async fn fetch_collation( ctx: &mut Context, state: &mut State, @@ -242,7 +242,7 @@ where } /// Report a collator for some malicious actions. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn report_collator( ctx: &mut Context, state: &mut State, @@ -260,7 +260,7 @@ where } /// Some other subsystem has reported a collator as a good one, bump reputation. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn note_good_collation( ctx: &mut Context, state: &mut State, @@ -275,7 +275,7 @@ where } /// Notify a collator that its collation got seconded. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn notify_collation_seconded( ctx: &mut impl SubsystemContext, state: &mut State, @@ -310,7 +310,7 @@ async fn notify_collation_seconded( /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be cancelled. /// - Advertisements by this peer that are no longer relevant have to be removed. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] async fn handle_peer_view_change( state: &mut State, peer_id: PeerId, @@ -352,7 +352,7 @@ async fn handle_peer_view_change( /// - Cancel all ongoing requests /// - Reply to interested parties if any /// - Store collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] async fn received_collation( ctx: &mut Context, state: &mut State, @@ -418,7 +418,7 @@ where /// - Check if the requested collation is in our view. /// - Update PerRequest records with the `result` field if necessary. /// And as such invocations of this function may rely on that. -#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, result), target = LOG_TARGET)] async fn request_collation( ctx: &mut Context, state: &mut State, @@ -498,7 +498,7 @@ where } /// Notify `CandidateSelectionSubsystem` that a collation has been advertised. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn notify_candidate_selection( ctx: &mut Context, collator: CollatorId, @@ -518,7 +518,7 @@ where } /// Networking message has been received. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, @@ -567,7 +567,7 @@ where /// A leaf has become inactive so we want to /// - Cancel all ongoing collation requests that are on top of that leaf. /// - Remove all stored collations relevant to that leaf. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] async fn remove_relay_parent( state: &mut State, relay_parent: Hash, @@ -591,7 +591,7 @@ async fn remove_relay_parent( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -626,7 +626,7 @@ async fn handle_our_view_change( } /// A request has timed out. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn request_timed_out( ctx: &mut Context, state: &mut State, @@ -650,7 +650,7 @@ where } /// Bridge event switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -685,7 +685,7 @@ where } /// The main message receiver switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn process_msg( ctx: &mut Context, msg: CollatorProtocolMessage, @@ -742,7 +742,7 @@ where } /// The main run loop. -#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(ctx, metrics), target = LOG_TARGET)] pub(crate) async fn run( mut ctx: Context, request_timeout: Duration, diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index aa1a9ea07398..d5d5b28a21aa 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -54,7 +54,7 @@ impl GossipSupport { Self {} } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] async fn run(self, mut ctx: Context) where Context: SubsystemContext, diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index f98a470f4bc7..7cc12bd02101 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -144,7 +144,7 @@ fn send_pov_message( /// Handles the signal. If successful, returns `true` if the subsystem should conclude, /// `false` otherwise. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_signal( state: &mut State, ctx: &mut impl SubsystemContext, @@ -211,7 +211,7 @@ async fn handle_signal( /// Notify peers that we are awaiting a given PoV hash. /// /// This only notifies peers who have the relay parent in their view. -#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx), target = LOG_TARGET)] async fn notify_all_we_are_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -240,7 +240,7 @@ async fn notify_all_we_are_awaiting( } /// Notify one peer about everything we're awaiting at a given relay-parent. -#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), target = LOG_TARGET)] async fn notify_one_we_are_awaiting_many( peer: &PeerId, ctx: &mut impl SubsystemContext, @@ -267,7 +267,7 @@ async fn notify_one_we_are_awaiting_many( } /// Distribute a PoV to peers who are awaiting it. -#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), target = LOG_TARGET)] async fn distribute_to_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -408,7 +408,7 @@ async fn determine_relevant_validators( } /// Handles a `FetchPoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), target = LOG_TARGET)] async fn handle_fetch( state: &mut State, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn handle_fetch( } /// Handles a `DistributePoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] async fn handle_distribute( state: &mut State, ctx: &mut impl SubsystemContext, @@ -512,7 +512,7 @@ async fn handle_distribute( } /// Report a reputation change for a peer. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] async fn report_peer( ctx: &mut impl SubsystemContext, peer: PeerId, @@ -522,7 +522,7 @@ async fn report_peer( } /// Handle a notification from a peer that they are awaiting some PoVs. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_awaiting( state: &mut State, ctx: &mut impl SubsystemContext, @@ -576,7 +576,7 @@ async fn handle_awaiting( /// Handle an incoming PoV from our peer. Reports them if unexpected, rewards them if not. /// /// Completes any requests awaiting that PoV. -#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), target = LOG_TARGET)] async fn handle_incoming_pov( state: &mut State, ctx: &mut impl SubsystemContext, @@ -663,7 +663,7 @@ fn handle_validator_connected(state: &mut State, peer_id: PeerId) { } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, @@ -733,7 +733,7 @@ impl PoVDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] async fn run( self, ctx: impl SubsystemContext, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 9c0f1bf7bf42..ae68b684b5cb 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -162,7 +162,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option { let already_known = self.sent_statements.contains(fingerprint) || self.received_statements.contains(fingerprint); @@ -211,7 +211,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn receive( &mut self, fingerprint: &(CompactStatement, ValidatorIndex), @@ -278,7 +278,7 @@ impl PeerData { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn send( &mut self, relay_parent: &Hash, @@ -303,7 +303,7 @@ impl PeerData { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn receive( &mut self, relay_parent: &Hash, @@ -422,7 +422,7 @@ impl ActiveHeadData { /// /// Any other statements or those that reference a candidate we are not aware of cannot be accepted /// and will return `NotedStatement::NotUseful`. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -503,7 +503,7 @@ fn check_statement_signature( /// circulates the statement to all peers who have not seen it yet, and /// sends all statements dependent on that statement to peers who could previously not receive /// them but now can. -#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), target = LOG_TARGET)] async fn circulate_statement_and_dependents( peers: &mut HashMap, active_heads: &mut HashMap, @@ -568,7 +568,7 @@ fn statement_message(relay_parent: Hash, statement: SignedFullStatement) /// Circulates a statement to all peers who have not seen it yet, and returns /// an iterator over peers who need to have dependent statements sent. -#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx), target = LOG_TARGET)] async fn circulate_statement( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -602,7 +602,7 @@ async fn circulate_statement( } /// Send all statements about a given candidate hash to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), target = LOG_TARGET)] async fn send_statements_about( peer: PeerId, peer_data: &mut PeerData, @@ -629,7 +629,7 @@ async fn send_statements_about( } /// Send all statements at a given relay-parent to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), target = LOG_TARGET)] async fn send_statements( peer: PeerId, peer_data: &mut PeerData, @@ -669,7 +669,7 @@ async fn report_peer( // // This function checks the signature and ensures the statement is compatible with our // view. It also notifies candidate backing if the statement was previously unknown. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), target = LOG_TARGET)] async fn handle_incoming_message<'a>( peer: PeerId, peer_data: &mut PeerData, @@ -766,7 +766,7 @@ async fn handle_incoming_message<'a>( } /// Update a peer's view. Sends all newly unlocked statements based on the previous -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), target = LOG_TARGET)] async fn update_peer_view_and_send_unlocked( peer: PeerId, peer_data: &mut PeerData, @@ -801,7 +801,7 @@ async fn update_peer_view_and_send_unlocked( } } -#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), target = LOG_TARGET)] async fn handle_network_update( peers: &mut HashMap, active_heads: &mut HashMap, @@ -889,7 +889,7 @@ async fn handle_network_update( } impl StatementDistribution { - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] async fn run( self, mut ctx: impl SubsystemContext, diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 696d6db66c63..a4c9e968c8db 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -197,19 +197,19 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] pub async fn block_imported(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockImported(block)).await } /// Send some message to one of the `Subsystem`s. - #[tracing::instrument(level = "trace", skip(self, msg), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, msg), target = LOG_TARGET)] pub async fn send_msg(&mut self, msg: impl Into) { self.send_and_log_error(Event::MsgToSubsystem(msg.into())).await } /// Inform the `Overseer` that some block was finalized. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] pub async fn block_finalized(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockFinalized(block)).await } @@ -221,7 +221,7 @@ impl OverseerHandler { /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, /// the response channel may never return if the hash was deactivated before this call. /// In this case, it's the caller's responsibility to ensure a timeout is set. - #[tracing::instrument(level = "trace", skip(self, response_channel), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, response_channel), target = LOG_TARGET)] pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) { self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation { hash, @@ -230,7 +230,7 @@ impl OverseerHandler { } /// Tell `Overseer` to shutdown. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] pub async fn stop(&mut self) { self.send_and_log_error(Event::Stop).await } @@ -1776,7 +1776,7 @@ where } /// Run the `Overseer`. - #[tracing::instrument(skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self), target = LOG_TARGET)] pub async fn run(mut self) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1856,7 +1856,7 @@ where } } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { match self.active_leaves.entry(block.hash) { hash_map::Entry::Vacant(entry) => entry.insert(block.number), @@ -1880,7 +1880,7 @@ where self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1909,7 +1909,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { self.candidate_validation_subsystem.send_signal(signal.clone()).await?; self.candidate_backing_subsystem.send_signal(signal.clone()).await?; @@ -1934,7 +1934,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] async fn route_message(&mut self, msg: MaybeTimed) -> SubsystemResult<()> { let msg = msg.into_inner(); self.metrics.on_message_relayed(); @@ -2001,7 +2001,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn on_head_activated(&mut self, hash: &Hash, parent_hash: Option) -> Arc { self.metrics.on_head_activated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -2022,14 +2022,14 @@ where span } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); self.activation_external_listeners.remove(hash); self.span_per_active_leaf.remove(hash); } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] fn clean_up_external_listeners(&mut self) { self.activation_external_listeners.retain(|_, v| { // remove dead listeners @@ -2038,7 +2038,7 @@ where }) } - #[tracing::instrument(level = "trace", skip(self, request), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, request), target = LOG_TARGET)] fn handle_external_request(&mut self, request: ExternalRequest) { match request { ExternalRequest::WaitForActivation { hash, response_channel } => { From 738fa8f69c40475f439e455e4f100ce4b425e619 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 17:21:28 +0100 Subject: [PATCH 09/13] Add jaeger span to responding side as well. --- node/network/availability-distribution/src/responder.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/node/network/availability-distribution/src/responder.rs b/node/network/availability-distribution/src/responder.rs index 83dd9ceef258..1c6dc3b7980c 100644 --- a/node/network/availability-distribution/src/responder.rs +++ b/node/network/availability-distribution/src/responder.rs @@ -22,7 +22,7 @@ use polkadot_node_network_protocol::request_response::{request::IncomingRequest, use polkadot_primitives::v1::{CandidateHash, ErasureChunk, ValidatorIndex}; use polkadot_subsystem::{ messages::{AllMessages, AvailabilityStoreMessage}, - SubsystemContext, + SubsystemContext, jaeger, }; use crate::error::{Error, Result}; @@ -65,6 +65,12 @@ pub async fn answer_request( where Context: SubsystemContext, { + let mut span = jaeger::candidate_hash_span(&req.payload.candidate_hash, "answer_request"); + span.add_stage(jaeger::Stage::AvailabilityDistribution); + let _child_span = span.child_builder("answer_chunk_request") + .with_chunk_index(req.payload.index.0) + .build(); + let chunk = query_chunk(ctx, req.payload.candidate_hash, req.payload.index).await?; let result = chunk.is_some(); From 2f2b267a8490159bf987a18f23f9689d474c7433 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 17:22:05 +0100 Subject: [PATCH 10/13] Revert "Fix instrumentation log target for real." This reverts commit e1c2a2e6ff6f257e702f07d8a77c2668af92b0ef. --- node/core/av-store/src/lib.rs | 4 +-- node/core/backing/src/lib.rs | 8 ++--- node/network/approval-distribution/src/lib.rs | 6 ++-- .../src/session_cache.rs | 4 +-- node/network/availability-recovery/src/lib.rs | 12 ++++---- node/network/bridge/src/lib.rs | 14 ++++----- node/network/bridge/src/network.rs | 2 +- .../network/bridge/src/validator_discovery.rs | 6 ++-- .../collator-protocol/src/collator_side.rs | 28 ++++++++--------- node/network/collator-protocol/src/lib.rs | 4 +-- .../collator-protocol/src/validator_side.rs | 30 +++++++++---------- node/network/gossip-support/src/lib.rs | 2 +- node/network/pov-distribution/src/lib.rs | 22 +++++++------- .../network/statement-distribution/src/lib.rs | 26 ++++++++-------- node/overseer/src/lib.rs | 28 ++++++++--------- 15 files changed, 98 insertions(+), 98 deletions(-) diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 2b5dcbd9f809..488b20f9a9d3 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -508,7 +508,7 @@ where } } -#[tracing::instrument(skip(subsystem, ctx), target = LOG_TARGET)] +#[tracing::instrument(skip(subsystem, ctx), fields(target = LOG_TARGET))] async fn run(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) where Context: SubsystemContext, @@ -534,7 +534,7 @@ where } } -#[tracing::instrument(level = "trace", skip(subsystem, ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(target = LOG_TARGET))] async fn run_iteration( ctx: &mut Context, subsystem: &mut AvailabilityStoreSubsystem, diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a9422e71e3bf..88c6f9383c8f 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -865,7 +865,7 @@ impl CandidateBackingJob { } /// Import the statement and kick off validation work if it is a part of our assignment. - #[tracing::instrument(level = "trace", skip(self, parent_span), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, parent_span), fields(target = LOG_TARGET))] async fn maybe_validate_and_import( &mut self, parent_span: &jaeger::Span, @@ -884,7 +884,7 @@ impl CandidateBackingJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn sign_statement(&self, statement: Statement) -> Option { let signed = self.table_context .validator @@ -897,7 +897,7 @@ impl CandidateBackingJob { Some(signed) } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { let idx = statement.validator_index().0 as usize; @@ -987,7 +987,7 @@ impl util::JobTrait for CandidateBackingJob { const NAME: &'static str = "CandidateBackingJob"; - #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), target = LOG_TARGET)] + #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(target = LOG_TARGET))] fn run( parent: Hash, span: Arc, diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index d839fa66c507..9bdee6d5eff2 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -830,7 +830,7 @@ impl State { /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn modify_reputation( ctx: &mut impl SubsystemContext, peer_id: PeerId, @@ -854,7 +854,7 @@ impl ApprovalDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run(self, ctx: Context) where Context: SubsystemContext, @@ -864,7 +864,7 @@ impl ApprovalDistribution { } /// Used for testing. - #[tracing::instrument(skip(self, ctx, state), target = LOG_TARGET)] + #[tracing::instrument(skip(self, ctx, state), fields(target = LOG_TARGET))] async fn run_inner(self, mut ctx: Context, state: &mut State) where Context: SubsystemContext, diff --git a/node/network/availability-distribution/src/session_cache.rs b/node/network/availability-distribution/src/session_cache.rs index 705dc6b7236e..471cffab8892 100644 --- a/node/network/availability-distribution/src/session_cache.rs +++ b/node/network/availability-distribution/src/session_cache.rs @@ -116,7 +116,7 @@ impl SessionCache { /// /// Use this function over any `fetch_session_info` if all you need is a reference to /// `SessionInfo`, as it avoids an expensive clone. - #[tracing::instrument(level = "trace", skip(self, ctx, with_info), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, ctx, with_info), fields(target = LOG_TARGET))] pub async fn with_session_info( &mut self, ctx: &mut Context, @@ -183,7 +183,7 @@ impl SessionCache { /// /// We assume validators in a group are tried in reverse order, so the reported bad validators /// will be put at the beginning of the group. - #[tracing::instrument(level = "trace", skip(self, report), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, report), fields(target = LOG_TARGET))] pub fn report_bad(&mut self, report: BadValidators) -> Result<()> { let session = self .session_info_cache diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index 466922747c31..a3ffb5c3d1f1 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -589,7 +589,7 @@ async fn report_peer( } /// Machinery around launching interactions into the background. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn launch_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -654,7 +654,7 @@ async fn launch_interaction( } /// Handles an availability recovery request. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_recover( state: &mut State, ctx: &mut impl SubsystemContext, @@ -718,7 +718,7 @@ async fn handle_recover( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn query_chunk( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -733,7 +733,7 @@ async fn query_chunk( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn query_full_data( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -747,7 +747,7 @@ async fn query_full_data( } /// Handles message from interaction. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_from_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -827,7 +827,7 @@ async fn handle_from_interaction( } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 7bbb61a1bba9..e2ce2eaf85b7 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -142,7 +142,7 @@ struct PeerData { } /// Main driver, processing network events and messages from other subsystems. -#[tracing::instrument(skip(bridge, ctx), target = LOG_TARGET)] +#[tracing::instrument(skip(bridge, ctx), fields(target = LOG_TARGET))] async fn run_network( mut bridge: NetworkBridge, mut ctx: impl SubsystemContext, @@ -417,7 +417,7 @@ fn construct_view(live_heads: impl DoubleEndedIterator, finalized_n ) } -#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(target = LOG_TARGET))] async fn update_our_view( net: &mut impl Network, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn update_our_view( // Handle messages on a specific peer-set. The peer is expected to be connected on that // peer-set. -#[tracing::instrument(level = "trace", skip(peers, messages, net), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(target = LOG_TARGET))] async fn handle_peer_messages( peer: PeerId, peers: &mut HashMap, @@ -516,7 +516,7 @@ async fn handle_peer_messages( Ok(outgoing_messages) } -#[tracing::instrument(level = "trace", skip(net, peers), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] async fn send_validation_message( net: &mut impl Network, peers: I, @@ -529,7 +529,7 @@ async fn send_validation_message( send_message(net, peers, PeerSet::Validation, message).await } -#[tracing::instrument(level = "trace", skip(net, peers), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] async fn send_collation_message( net: &mut impl Network, peers: I, @@ -557,7 +557,7 @@ async fn dispatch_collation_event_to_all( dispatch_collation_events_to_all(std::iter::once(event), ctx).await } -#[tracing::instrument(level = "trace", skip(events, ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] async fn dispatch_validation_events_to_all( events: I, ctx: &mut impl SubsystemContext, @@ -569,7 +569,7 @@ async fn dispatch_validation_events_to_all( ctx.send_messages(events.into_iter().flat_map(AllMessages::dispatch_iter)).await } -#[tracing::instrument(level = "trace", skip(events, ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] async fn dispatch_collation_events_to_all( events: I, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 713b56c009c7..4700a0549f52 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -151,7 +151,7 @@ impl Network for Arc> { NetworkService::event_stream(self, "polkadot-network-bridge").boxed() } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn action_sink<'a>( &'a mut self, ) -> Pin + Send + 'a>> { diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 36a6093d2ad5..e4c3cbbb7d04 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -169,7 +169,7 @@ impl Service { /// Find connected validators using the given `validator_ids`. /// /// Returns a [`HashMap`] that contains the found [`AuthorityDiscoveryId`]'s and their associated [`PeerId`]'s. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] async fn find_connected_validators( &mut self, validator_ids: &[AuthorityDiscoveryId], @@ -216,7 +216,7 @@ impl Service { /// This method will also clean up all previously revoked requests. /// it takes `network_service` and `authority_discovery_service` by value /// and returns them as a workaround for the Future: Send requirement imposed by async fn impl. - #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(target = LOG_TARGET))] pub async fn on_request( &mut self, validator_ids: Vec, @@ -335,7 +335,7 @@ impl Service { } /// Should be called when a peer connected. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] pub async fn on_peer_connected( &mut self, peer_id: PeerId, diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 2c85c63404c2..7f28e58697f0 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -260,7 +260,7 @@ impl State { /// or the relay-parent isn't in the active-leaves set, we ignore the message /// as it must be invalid in that case - although this indicates a logic error /// elsewhere in the node. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn distribute_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -338,7 +338,7 @@ async fn distribute_collation( /// Get the Id of the Core that is assigned to the para being collated on if any /// and the total number of cores. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn determine_core( ctx: &mut impl SubsystemContext, para_id: ParaId, @@ -360,7 +360,7 @@ async fn determine_core( /// Figure out current and next group of validators assigned to the para being collated on. /// /// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn determine_our_validators( ctx: &mut impl SubsystemContext, core_index: CoreIndex, @@ -386,7 +386,7 @@ async fn determine_our_validators( } /// Issue a `Declare` collation message to the given `peer`. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn declare( ctx: &mut impl SubsystemContext, state: &mut State, @@ -404,7 +404,7 @@ async fn declare( /// Issue a connection request to a set of validators and /// revoke the previous connection request. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn connect_to_validators( ctx: &mut impl SubsystemContext, relay_parent: Hash, @@ -428,7 +428,7 @@ async fn connect_to_validators( /// /// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is /// set as validator for our para at the given `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn advertise_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -484,7 +484,7 @@ async fn advertise_collation( } /// The main incoming message dispatching switch. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn process_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -568,7 +568,7 @@ async fn process_msg( } /// Issue a response to a previously requested collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn send_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -602,7 +602,7 @@ async fn send_collation( } /// A networking messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_incoming_peer_message( ctx: &mut impl SubsystemContext, state: &mut State, @@ -685,7 +685,7 @@ async fn handle_incoming_peer_message( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut impl SubsystemContext, state: &mut State, @@ -706,7 +706,7 @@ async fn handle_peer_view_change( /// A validator is connected. /// /// `Declare` that we are a collator with a given `CollatorId`. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_validator_connected( ctx: &mut impl SubsystemContext, state: &mut State, @@ -735,7 +735,7 @@ async fn handle_validator_connected( } /// Bridge messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -767,7 +767,7 @@ async fn handle_network_msg( } /// Handles our view changes. -#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -810,7 +810,7 @@ async fn handle_our_view_change( } /// The collator protocol collator side main loop. -#[tracing::instrument(skip(ctx, metrics), target = LOG_TARGET)] +#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] pub(crate) async fn run( mut ctx: impl SubsystemContext, our_id: CollatorId, diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index c9b49dd436ab..33037d736659 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -86,7 +86,7 @@ impl CollatorProtocolSubsystem { } } - #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -126,7 +126,7 @@ where } /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) where Context: SubsystemContext, diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index 561201518565..a81bd9413e59 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -214,7 +214,7 @@ struct State { } /// Another subsystem has requested to fetch collations on a particular leaf for some para. -#[tracing::instrument(level = "trace", skip(ctx, state, tx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(target = LOG_TARGET))] async fn fetch_collation( ctx: &mut Context, state: &mut State, @@ -242,7 +242,7 @@ where } /// Report a collator for some malicious actions. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn report_collator( ctx: &mut Context, state: &mut State, @@ -260,7 +260,7 @@ where } /// Some other subsystem has reported a collator as a good one, bump reputation. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn note_good_collation( ctx: &mut Context, state: &mut State, @@ -275,7 +275,7 @@ where } /// Notify a collator that its collation got seconded. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn notify_collation_seconded( ctx: &mut impl SubsystemContext, state: &mut State, @@ -310,7 +310,7 @@ async fn notify_collation_seconded( /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be cancelled. /// - Advertisements by this peer that are no longer relevant have to be removed. -#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn handle_peer_view_change( state: &mut State, peer_id: PeerId, @@ -352,7 +352,7 @@ async fn handle_peer_view_change( /// - Cancel all ongoing requests /// - Reply to interested parties if any /// - Store collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn received_collation( ctx: &mut Context, state: &mut State, @@ -418,7 +418,7 @@ where /// - Check if the requested collation is in our view. /// - Update PerRequest records with the `result` field if necessary. /// And as such invocations of this function may rely on that. -#[tracing::instrument(level = "trace", skip(ctx, state, result), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(target = LOG_TARGET))] async fn request_collation( ctx: &mut Context, state: &mut State, @@ -498,7 +498,7 @@ where } /// Notify `CandidateSelectionSubsystem` that a collation has been advertised. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn notify_candidate_selection( ctx: &mut Context, collator: CollatorId, @@ -518,7 +518,7 @@ where } /// Networking message has been received. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, @@ -567,7 +567,7 @@ where /// A leaf has become inactive so we want to /// - Cancel all ongoing collation requests that are on top of that leaf. /// - Remove all stored collations relevant to that leaf. -#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn remove_relay_parent( state: &mut State, relay_parent: Hash, @@ -591,7 +591,7 @@ async fn remove_relay_parent( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -626,7 +626,7 @@ async fn handle_our_view_change( } /// A request has timed out. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn request_timed_out( ctx: &mut Context, state: &mut State, @@ -650,7 +650,7 @@ where } /// Bridge event switch. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -685,7 +685,7 @@ where } /// The main message receiver switch. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn process_msg( ctx: &mut Context, msg: CollatorProtocolMessage, @@ -742,7 +742,7 @@ where } /// The main run loop. -#[tracing::instrument(skip(ctx, metrics), target = LOG_TARGET)] +#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] pub(crate) async fn run( mut ctx: Context, request_timeout: Duration, diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index d5d5b28a21aa..aa1a9ea07398 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -54,7 +54,7 @@ impl GossipSupport { Self {} } - #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run(self, mut ctx: Context) where Context: SubsystemContext, diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index 7cc12bd02101..f98a470f4bc7 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -144,7 +144,7 @@ fn send_pov_message( /// Handles the signal. If successful, returns `true` if the subsystem should conclude, /// `false` otherwise. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_signal( state: &mut State, ctx: &mut impl SubsystemContext, @@ -211,7 +211,7 @@ async fn handle_signal( /// Notify peers that we are awaiting a given PoV hash. /// /// This only notifies peers who have the relay parent in their view. -#[tracing::instrument(level = "trace", skip(peers, ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] async fn notify_all_we_are_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -240,7 +240,7 @@ async fn notify_all_we_are_awaiting( } /// Notify one peer about everything we're awaiting at a given relay-parent. -#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(target = LOG_TARGET))] async fn notify_one_we_are_awaiting_many( peer: &PeerId, ctx: &mut impl SubsystemContext, @@ -267,7 +267,7 @@ async fn notify_one_we_are_awaiting_many( } /// Distribute a PoV to peers who are awaiting it. -#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(target = LOG_TARGET))] async fn distribute_to_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -408,7 +408,7 @@ async fn determine_relevant_validators( } /// Handles a `FetchPoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(target = LOG_TARGET))] async fn handle_fetch( state: &mut State, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn handle_fetch( } /// Handles a `DistributePoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] async fn handle_distribute( state: &mut State, ctx: &mut impl SubsystemContext, @@ -512,7 +512,7 @@ async fn handle_distribute( } /// Report a reputation change for a peer. -#[tracing::instrument(level = "trace", skip(ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] async fn report_peer( ctx: &mut impl SubsystemContext, peer: PeerId, @@ -522,7 +522,7 @@ async fn report_peer( } /// Handle a notification from a peer that they are awaiting some PoVs. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_awaiting( state: &mut State, ctx: &mut impl SubsystemContext, @@ -576,7 +576,7 @@ async fn handle_awaiting( /// Handle an incoming PoV from our peer. Reports them if unexpected, rewards them if not. /// /// Completes any requests awaiting that PoV. -#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), fields(target = LOG_TARGET))] async fn handle_incoming_pov( state: &mut State, ctx: &mut impl SubsystemContext, @@ -663,7 +663,7 @@ fn handle_validator_connected(state: &mut State, peer_id: PeerId) { } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, @@ -733,7 +733,7 @@ impl PoVDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run( self, ctx: impl SubsystemContext, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index ae68b684b5cb..9c0f1bf7bf42 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -162,7 +162,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option { let already_known = self.sent_statements.contains(fingerprint) || self.received_statements.contains(fingerprint); @@ -211,7 +211,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn receive( &mut self, fingerprint: &(CompactStatement, ValidatorIndex), @@ -278,7 +278,7 @@ impl PeerData { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn send( &mut self, relay_parent: &Hash, @@ -303,7 +303,7 @@ impl PeerData { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn receive( &mut self, relay_parent: &Hash, @@ -422,7 +422,7 @@ impl ActiveHeadData { /// /// Any other statements or those that reference a candidate we are not aware of cannot be accepted /// and will return `NotedStatement::NotUseful`. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -503,7 +503,7 @@ fn check_statement_signature( /// circulates the statement to all peers who have not seen it yet, and /// sends all statements dependent on that statement to peers who could previously not receive /// them but now can. -#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(target = LOG_TARGET))] async fn circulate_statement_and_dependents( peers: &mut HashMap, active_heads: &mut HashMap, @@ -568,7 +568,7 @@ fn statement_message(relay_parent: Hash, statement: SignedFullStatement) /// Circulates a statement to all peers who have not seen it yet, and returns /// an iterator over peers who need to have dependent statements sent. -#[tracing::instrument(level = "trace", skip(peers, ctx), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] async fn circulate_statement( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -602,7 +602,7 @@ async fn circulate_statement( } /// Send all statements about a given candidate hash to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] async fn send_statements_about( peer: PeerId, peer_data: &mut PeerData, @@ -629,7 +629,7 @@ async fn send_statements_about( } /// Send all statements at a given relay-parent to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] async fn send_statements( peer: PeerId, peer_data: &mut PeerData, @@ -669,7 +669,7 @@ async fn report_peer( // // This function checks the signature and ensures the statement is compatible with our // view. It also notifies candidate backing if the statement was previously unknown. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] async fn handle_incoming_message<'a>( peer: PeerId, peer_data: &mut PeerData, @@ -766,7 +766,7 @@ async fn handle_incoming_message<'a>( } /// Update a peer's view. Sends all newly unlocked statements based on the previous -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] async fn update_peer_view_and_send_unlocked( peer: PeerId, peer_data: &mut PeerData, @@ -801,7 +801,7 @@ async fn update_peer_view_and_send_unlocked( } } -#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), target = LOG_TARGET)] +#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(target = LOG_TARGET))] async fn handle_network_update( peers: &mut HashMap, active_heads: &mut HashMap, @@ -889,7 +889,7 @@ async fn handle_network_update( } impl StatementDistribution { - #[tracing::instrument(skip(self, ctx), target = LOG_TARGET)] + #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index a4c9e968c8db..696d6db66c63 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -197,19 +197,19 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] pub async fn block_imported(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockImported(block)).await } /// Send some message to one of the `Subsystem`s. - #[tracing::instrument(level = "trace", skip(self, msg), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, msg), fields(target = LOG_TARGET))] pub async fn send_msg(&mut self, msg: impl Into) { self.send_and_log_error(Event::MsgToSubsystem(msg.into())).await } /// Inform the `Overseer` that some block was finalized. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] pub async fn block_finalized(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockFinalized(block)).await } @@ -221,7 +221,7 @@ impl OverseerHandler { /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, /// the response channel may never return if the hash was deactivated before this call. /// In this case, it's the caller's responsibility to ensure a timeout is set. - #[tracing::instrument(level = "trace", skip(self, response_channel), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, response_channel), fields(target = LOG_TARGET))] pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) { self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation { hash, @@ -230,7 +230,7 @@ impl OverseerHandler { } /// Tell `Overseer` to shutdown. - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] pub async fn stop(&mut self) { self.send_and_log_error(Event::Stop).await } @@ -1776,7 +1776,7 @@ where } /// Run the `Overseer`. - #[tracing::instrument(skip(self), target = LOG_TARGET)] + #[tracing::instrument(skip(self), fields(target = LOG_TARGET))] pub async fn run(mut self) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1856,7 +1856,7 @@ where } } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { match self.active_leaves.entry(block.hash) { hash_map::Entry::Vacant(entry) => entry.insert(block.number), @@ -1880,7 +1880,7 @@ where self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1909,7 +1909,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { self.candidate_validation_subsystem.send_signal(signal.clone()).await?; self.candidate_backing_subsystem.send_signal(signal.clone()).await?; @@ -1934,7 +1934,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] async fn route_message(&mut self, msg: MaybeTimed) -> SubsystemResult<()> { let msg = msg.into_inner(); self.metrics.on_message_relayed(); @@ -2001,7 +2001,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn on_head_activated(&mut self, hash: &Hash, parent_hash: Option) -> Arc { self.metrics.on_head_activated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -2022,14 +2022,14 @@ where span } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); self.activation_external_listeners.remove(hash); self.span_per_active_leaf.remove(hash); } - #[tracing::instrument(level = "trace", skip(self), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] fn clean_up_external_listeners(&mut self) { self.activation_external_listeners.retain(|_, v| { // remove dead listeners @@ -2038,7 +2038,7 @@ where }) } - #[tracing::instrument(level = "trace", skip(self, request), target = LOG_TARGET)] + #[tracing::instrument(level = "trace", skip(self, request), fields(target = LOG_TARGET))] fn handle_external_request(&mut self, request: ExternalRequest) { match request { ExternalRequest::WaitForActivation { hash, response_channel } => { From 54d480549a688c7fa39e9f776ac9cbfb5db1c282 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 17:26:16 +0100 Subject: [PATCH 11/13] Revert "Fix instrumentation to use log target properly." This reverts commit 7caa0bd1acc6fe9727bb3a91851560d756c40ab8. --- node/collation-generation/src/lib.rs | 8 ++--- node/core/av-store/src/lib.rs | 4 +-- node/core/backing/src/lib.rs | 8 ++--- node/core/candidate-selection/src/lib.rs | 8 ++--- node/core/candidate-validation/src/lib.rs | 14 ++++----- node/core/chain-api/src/lib.rs | 2 +- node/core/provisioner/src/lib.rs | 14 ++++----- node/core/runtime-api/src/lib.rs | 4 +-- node/network/approval-distribution/src/lib.rs | 6 ++-- .../src/requester/mod.rs | 2 +- .../src/responder.rs | 3 +- .../src/session_cache.rs | 2 +- node/network/availability-recovery/src/lib.rs | 12 ++++---- node/network/bridge/src/lib.rs | 14 ++++----- node/network/bridge/src/network.rs | 2 +- .../network/bridge/src/validator_discovery.rs | 6 ++-- .../collator-protocol/src/collator_side.rs | 28 ++++++++--------- node/network/collator-protocol/src/lib.rs | 4 +-- .../collator-protocol/src/validator_side.rs | 30 +++++++++---------- node/network/gossip-support/src/lib.rs | 2 +- node/network/pov-distribution/src/lib.rs | 22 +++++++------- .../network/statement-distribution/src/lib.rs | 26 ++++++++-------- node/overseer/src/lib.rs | 28 ++++++++--------- 23 files changed, 124 insertions(+), 125 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index fd313f9f3b7f..4a93152f9787 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -74,7 +74,7 @@ impl CollationGenerationSubsystem { /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. /// Otherwise, most are logged and then discarded. - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(mut self, mut ctx: Context) where Context: SubsystemContext, @@ -107,7 +107,7 @@ impl CollationGenerationSubsystem { // note: this doesn't strictly need to be a separate function; it's more an administrative function // so that we don't clutter the run loop. It could in principle be inlined directly into there. // it should hopefully therefore be ok that it's an async function mutably borrowing self. - #[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))] async fn handle_incoming( &mut self, incoming: SubsystemResult>, @@ -181,7 +181,7 @@ where } } -#[tracing::instrument(level = "trace", skip(ctx, metrics, sender, activated), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, metrics, sender, activated), fields(subsystem = LOG_TARGET))] async fn handle_new_activations( config: Arc, activated: impl IntoIterator, @@ -364,7 +364,7 @@ async fn handle_new_activations( Ok(()) } -#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn erasure_root( n_validators: usize, persisted_validation: PersistedValidationData, diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 488b20f9a9d3..4a624a1621ae 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -508,7 +508,7 @@ where } } -#[tracing::instrument(skip(subsystem, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn run(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) where Context: SubsystemContext, @@ -534,7 +534,7 @@ where } } -#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn run_iteration( ctx: &mut Context, subsystem: &mut AvailabilityStoreSubsystem, diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 88c6f9383c8f..b135458ab99f 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -865,7 +865,7 @@ impl CandidateBackingJob { } /// Import the statement and kick off validation work if it is a part of our assignment. - #[tracing::instrument(level = "trace", skip(self, parent_span), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, parent_span), fields(subsystem = LOG_TARGET))] async fn maybe_validate_and_import( &mut self, parent_span: &jaeger::Span, @@ -884,7 +884,7 @@ impl CandidateBackingJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn sign_statement(&self, statement: Statement) -> Option { let signed = self.table_context .validator @@ -897,7 +897,7 @@ impl CandidateBackingJob { Some(signed) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { let idx = statement.validator_index().0 as usize; @@ -987,7 +987,7 @@ impl util::JobTrait for CandidateBackingJob { const NAME: &'static str = "CandidateBackingJob"; - #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(subsystem = LOG_TARGET))] fn run( parent: Hash, span: Arc, diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index cc62c5d9bea2..58ef93fb9ac9 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -93,7 +93,7 @@ impl JobTrait for CandidateSelectionJob { const NAME: &'static str = "CandidateSelectionJob"; - #[tracing::instrument(skip(keystore, metrics, receiver, sender), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(keystore, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, span: Arc, @@ -222,7 +222,7 @@ impl CandidateSelectionJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn handle_collation( &mut self, relay_parent: Hash, @@ -280,7 +280,7 @@ impl CandidateSelectionJob { } } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn handle_invalid(&mut self, candidate_receipt: CandidateReceipt) { let _timer = self.metrics.time_handle_invalid(); @@ -358,7 +358,7 @@ impl CandidateSelectionJob { // get a collation from the Collator Protocol subsystem // // note that this gets an owned clone of the sender; that's becuase unlike `forward_invalidity_note`, it's expected to take a while longer -#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_collation( relay_parent: Hash, para_id: ParaId, diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index f960226ae1e0..f6ca38437bfa 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -85,7 +85,7 @@ impl Subsystem for CandidateValidationSubsystem where } } -#[tracing::instrument(skip(ctx, spawn, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(ctx, spawn, metrics), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, spawn: impl SpawnNamed + Clone + 'static, @@ -183,7 +183,7 @@ enum AssumptionCheckOutcome { BadRequest, } -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn check_assumption_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -234,7 +234,7 @@ async fn check_assumption_validation_data( }) } -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn find_assumed_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -266,7 +266,7 @@ async fn find_assumed_validation_data( Ok(AssumptionCheckOutcome::DoesNotMatch) } -#[tracing::instrument(level = "trace", skip(ctx, pov, spawn, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] async fn spawn_validate_from_chain_state( ctx: &mut impl SubsystemContext, isolation_strategy: IsolationStrategy, @@ -328,7 +328,7 @@ async fn spawn_validate_from_chain_state( validation_result } -#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] async fn spawn_validate_exhaustive( ctx: &mut impl SubsystemContext, isolation_strategy: IsolationStrategy, @@ -361,7 +361,7 @@ async fn spawn_validate_exhaustive( /// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks /// are passed, `Err` otherwise. -#[tracing::instrument(level = "trace", skip(pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(pov), fields(subsystem = LOG_TARGET))] fn perform_basic_checks( candidate: &CandidateDescriptor, max_pov_size: u32, @@ -419,7 +419,7 @@ impl ValidationBackend for RealValidationBackend { /// Validates the candidate from exhaustive parameters. /// /// Sends the result of validation on the channel once complete. -#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] fn validate_candidate_exhaustive( backend_arg: B::Arg, persisted_validation_data: PersistedValidationData, diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs index 262239c81374..534c41e3ef85 100644 --- a/node/core/chain-api/src/lib.rs +++ b/node/core/chain-api/src/lib.rs @@ -77,7 +77,7 @@ impl Subsystem for ChainApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, subsystem: ChainApiSubsystem, diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index dff23e254cef..f773ec27a8dc 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -138,7 +138,7 @@ impl JobTrait for ProvisioningJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop - #[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, span: Arc, @@ -238,7 +238,7 @@ impl ProvisioningJob { } } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_provisionable_data(&mut self, provisionable_data: ProvisionableData) { match provisionable_data { ProvisionableData::Bitfield(_, signed_bitfield) => { @@ -271,7 +271,7 @@ type CoreAvailability = BitVec; /// When we're choosing bitfields to include, the rule should be simple: /// maximize availability. So basically, include all bitfields. And then /// choose a coherent set of candidates along with that. -#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))] async fn send_inherent_data( relay_parent: Hash, bitfields: &[SignedAvailabilityBitfield], @@ -311,7 +311,7 @@ async fn send_inherent_data( /// /// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated /// to the sorting of the input. -#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn select_availability_bitfields( cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -343,7 +343,7 @@ fn select_availability_bitfields( } /// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. -#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -465,7 +465,7 @@ async fn select_candidates( /// Produces a block number 1 higher than that of the relay parent /// in the event of an invalid `relay_parent`, returns `Ok(0)` -#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_block_number_under_construction( relay_parent: Hash, sender: &mut mpsc::Sender, @@ -491,7 +491,7 @@ async fn get_block_number_under_construction( /// - construct a transverse slice along `core_idx` /// - bitwise-or it with the availability slice /// - count the 1 bits, compare to the total length; true on 2/3+ -#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn bitfields_indicate_availability( core_idx: usize, bitfields: &[SignedAvailabilityBitfield], diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index e9f9adbbd243..cb32dd57728a 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -257,7 +257,7 @@ impl RuntimeApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, mut subsystem: RuntimeApiSubsystem, @@ -282,7 +282,7 @@ async fn run( } } -#[tracing::instrument(level = "trace", skip(client, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(client, metrics), fields(subsystem = LOG_TARGET))] fn make_runtime_api_request( client: Arc, metrics: Metrics, diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 9bdee6d5eff2..4d98d58ba79c 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -830,7 +830,7 @@ impl State { /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation( ctx: &mut impl SubsystemContext, peer_id: PeerId, @@ -854,7 +854,7 @@ impl ApprovalDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, ctx: Context) where Context: SubsystemContext, @@ -864,7 +864,7 @@ impl ApprovalDistribution { } /// Used for testing. - #[tracing::instrument(skip(self, ctx, state), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx, state), fields(subsystem = LOG_TARGET))] async fn run_inner(self, mut ctx: Context, state: &mut State) where Context: SubsystemContext, diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index 31b823f7367f..c231f68c6aa9 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -215,7 +215,7 @@ impl Stream for Requester { } /// Query all hashes and descriptors of candidates pending availability at a particular block. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_occupied_cores( ctx: &mut Context, relay_parent: Hash, diff --git a/node/network/availability-distribution/src/responder.rs b/node/network/availability-distribution/src/responder.rs index 1c6dc3b7980c..3e233654c9b5 100644 --- a/node/network/availability-distribution/src/responder.rs +++ b/node/network/availability-distribution/src/responder.rs @@ -57,7 +57,6 @@ where /// Answer an incoming chunk request by querying the av store. /// /// Returns: Ok(true) if chunk was found and served. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] pub async fn answer_request( ctx: &mut Context, req: IncomingRequest, @@ -85,7 +84,7 @@ where } /// Query chunk from the availability store. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_chunk( ctx: &mut Context, candidate_hash: CandidateHash, diff --git a/node/network/availability-distribution/src/session_cache.rs b/node/network/availability-distribution/src/session_cache.rs index 471cffab8892..9e9dd6e9d9db 100644 --- a/node/network/availability-distribution/src/session_cache.rs +++ b/node/network/availability-distribution/src/session_cache.rs @@ -183,7 +183,7 @@ impl SessionCache { /// /// We assume validators in a group are tried in reverse order, so the reported bad validators /// will be put at the beginning of the group. - #[tracing::instrument(level = "trace", skip(self, report), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, report), fields(subsystem = LOG_TARGET))] pub fn report_bad(&mut self, report: BadValidators) -> Result<()> { let session = self .session_info_cache diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index a3ffb5c3d1f1..a18fe1eda96d 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -589,7 +589,7 @@ async fn report_peer( } /// Machinery around launching interactions into the background. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn launch_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -654,7 +654,7 @@ async fn launch_interaction( } /// Handles an availability recovery request. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_recover( state: &mut State, ctx: &mut impl SubsystemContext, @@ -718,7 +718,7 @@ async fn handle_recover( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_chunk( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -733,7 +733,7 @@ async fn query_chunk( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_full_data( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, @@ -747,7 +747,7 @@ async fn query_full_data( } /// Handles message from interaction. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_from_interaction( state: &mut State, ctx: &mut impl SubsystemContext, @@ -827,7 +827,7 @@ async fn handle_from_interaction( } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index e2ce2eaf85b7..a49363846cee 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -142,7 +142,7 @@ struct PeerData { } /// Main driver, processing network events and messages from other subsystems. -#[tracing::instrument(skip(bridge, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(bridge, ctx), fields(subsystem = LOG_TARGET))] async fn run_network( mut bridge: NetworkBridge, mut ctx: impl SubsystemContext, @@ -417,7 +417,7 @@ fn construct_view(live_heads: impl DoubleEndedIterator, finalized_n ) } -#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(subsystem = LOG_TARGET))] async fn update_our_view( net: &mut impl Network, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn update_our_view( // Handle messages on a specific peer-set. The peer is expected to be connected on that // peer-set. -#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(subsystem = LOG_TARGET))] async fn handle_peer_messages( peer: PeerId, peers: &mut HashMap, @@ -516,7 +516,7 @@ async fn handle_peer_messages( Ok(outgoing_messages) } -#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] async fn send_validation_message( net: &mut impl Network, peers: I, @@ -529,7 +529,7 @@ async fn send_validation_message( send_message(net, peers, PeerSet::Validation, message).await } -#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] async fn send_collation_message( net: &mut impl Network, peers: I, @@ -557,7 +557,7 @@ async fn dispatch_collation_event_to_all( dispatch_collation_events_to_all(std::iter::once(event), ctx).await } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_validation_events_to_all( events: I, ctx: &mut impl SubsystemContext, @@ -569,7 +569,7 @@ async fn dispatch_validation_events_to_all( ctx.send_messages(events.into_iter().flat_map(AllMessages::dispatch_iter)).await } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_collation_events_to_all( events: I, ctx: &mut impl SubsystemContext, diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 4700a0549f52..ed25f9f36827 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -151,7 +151,7 @@ impl Network for Arc> { NetworkService::event_stream(self, "polkadot-network-bridge").boxed() } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn action_sink<'a>( &'a mut self, ) -> Pin + Send + 'a>> { diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index e4c3cbbb7d04..87ff378fd622 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -169,7 +169,7 @@ impl Service { /// Find connected validators using the given `validator_ids`. /// /// Returns a [`HashMap`] that contains the found [`AuthorityDiscoveryId`]'s and their associated [`PeerId`]'s. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] async fn find_connected_validators( &mut self, validator_ids: &[AuthorityDiscoveryId], @@ -216,7 +216,7 @@ impl Service { /// This method will also clean up all previously revoked requests. /// it takes `network_service` and `authority_discovery_service` by value /// and returns them as a workaround for the Future: Send requirement imposed by async fn impl. - #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(subsystem = LOG_TARGET))] pub async fn on_request( &mut self, validator_ids: Vec, @@ -335,7 +335,7 @@ impl Service { } /// Should be called when a peer connected. - #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] pub async fn on_peer_connected( &mut self, peer_id: PeerId, diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 7f28e58697f0..91d6768dc665 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -260,7 +260,7 @@ impl State { /// or the relay-parent isn't in the active-leaves set, we ignore the message /// as it must be invalid in that case - although this indicates a logic error /// elsewhere in the node. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn distribute_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -338,7 +338,7 @@ async fn distribute_collation( /// Get the Id of the Core that is assigned to the para being collated on if any /// and the total number of cores. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn determine_core( ctx: &mut impl SubsystemContext, para_id: ParaId, @@ -360,7 +360,7 @@ async fn determine_core( /// Figure out current and next group of validators assigned to the para being collated on. /// /// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn determine_our_validators( ctx: &mut impl SubsystemContext, core_index: CoreIndex, @@ -386,7 +386,7 @@ async fn determine_our_validators( } /// Issue a `Declare` collation message to the given `peer`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn declare( ctx: &mut impl SubsystemContext, state: &mut State, @@ -404,7 +404,7 @@ async fn declare( /// Issue a connection request to a set of validators and /// revoke the previous connection request. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn connect_to_validators( ctx: &mut impl SubsystemContext, relay_parent: Hash, @@ -428,7 +428,7 @@ async fn connect_to_validators( /// /// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is /// set as validator for our para at the given `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn advertise_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -484,7 +484,7 @@ async fn advertise_collation( } /// The main incoming message dispatching switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -568,7 +568,7 @@ async fn process_msg( } /// Issue a response to a previously requested collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn send_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -602,7 +602,7 @@ async fn send_collation( } /// A networking messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_incoming_peer_message( ctx: &mut impl SubsystemContext, state: &mut State, @@ -685,7 +685,7 @@ async fn handle_incoming_peer_message( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut impl SubsystemContext, state: &mut State, @@ -706,7 +706,7 @@ async fn handle_peer_view_change( /// A validator is connected. /// /// `Declare` that we are a collator with a given `CollatorId`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_validator_connected( ctx: &mut impl SubsystemContext, state: &mut State, @@ -735,7 +735,7 @@ async fn handle_validator_connected( } /// Bridge messages switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut impl SubsystemContext, state: &mut State, @@ -767,7 +767,7 @@ async fn handle_network_msg( } /// Handles our view changes. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -810,7 +810,7 @@ async fn handle_our_view_change( } /// The collator protocol collator side main loop. -#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] pub(crate) async fn run( mut ctx: impl SubsystemContext, our_id: CollatorId, diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 33037d736659..a1201c597e9c 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -86,7 +86,7 @@ impl CollatorProtocolSubsystem { } } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -126,7 +126,7 @@ where } /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) where Context: SubsystemContext, diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index a81bd9413e59..a3a8216e5fc7 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -214,7 +214,7 @@ struct State { } /// Another subsystem has requested to fetch collations on a particular leaf for some para. -#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(subsystem = LOG_TARGET))] async fn fetch_collation( ctx: &mut Context, state: &mut State, @@ -242,7 +242,7 @@ where } /// Report a collator for some malicious actions. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn report_collator( ctx: &mut Context, state: &mut State, @@ -260,7 +260,7 @@ where } /// Some other subsystem has reported a collator as a good one, bump reputation. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn note_good_collation( ctx: &mut Context, state: &mut State, @@ -275,7 +275,7 @@ where } /// Notify a collator that its collation got seconded. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn notify_collation_seconded( ctx: &mut impl SubsystemContext, state: &mut State, @@ -310,7 +310,7 @@ async fn notify_collation_seconded( /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be cancelled. /// - Advertisements by this peer that are no longer relevant have to be removed. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( state: &mut State, peer_id: PeerId, @@ -352,7 +352,7 @@ async fn handle_peer_view_change( /// - Cancel all ongoing requests /// - Reply to interested parties if any /// - Store collation. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn received_collation( ctx: &mut Context, state: &mut State, @@ -418,7 +418,7 @@ where /// - Check if the requested collation is in our view. /// - Update PerRequest records with the `result` field if necessary. /// And as such invocations of this function may rely on that. -#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(subsystem = LOG_TARGET))] async fn request_collation( ctx: &mut Context, state: &mut State, @@ -498,7 +498,7 @@ where } /// Notify `CandidateSelectionSubsystem` that a collation has been advertised. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn notify_candidate_selection( ctx: &mut Context, collator: CollatorId, @@ -518,7 +518,7 @@ where } /// Networking message has been received. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, @@ -567,7 +567,7 @@ where /// A leaf has become inactive so we want to /// - Cancel all ongoing collation requests that are on top of that leaf. /// - Remove all stored collations relevant to that leaf. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn remove_relay_parent( state: &mut State, relay_parent: Hash, @@ -591,7 +591,7 @@ async fn remove_relay_parent( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -626,7 +626,7 @@ async fn handle_our_view_change( } /// A request has timed out. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn request_timed_out( ctx: &mut Context, state: &mut State, @@ -650,7 +650,7 @@ where } /// Bridge event switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -685,7 +685,7 @@ where } /// The main message receiver switch. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_msg( ctx: &mut Context, msg: CollatorProtocolMessage, @@ -742,7 +742,7 @@ where } /// The main run loop. -#[tracing::instrument(skip(ctx, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] pub(crate) async fn run( mut ctx: Context, request_timeout: Duration, diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index aa1a9ea07398..8ade01a0ced8 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -54,7 +54,7 @@ impl GossipSupport { Self {} } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, mut ctx: Context) where Context: SubsystemContext, diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index f98a470f4bc7..fc18fb8fb820 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -144,7 +144,7 @@ fn send_pov_message( /// Handles the signal. If successful, returns `true` if the subsystem should conclude, /// `false` otherwise. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_signal( state: &mut State, ctx: &mut impl SubsystemContext, @@ -211,7 +211,7 @@ async fn handle_signal( /// Notify peers that we are awaiting a given PoV hash. /// /// This only notifies peers who have the relay parent in their view. -#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] async fn notify_all_we_are_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -240,7 +240,7 @@ async fn notify_all_we_are_awaiting( } /// Notify one peer about everything we're awaiting at a given relay-parent. -#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(subsystem = LOG_TARGET))] async fn notify_one_we_are_awaiting_many( peer: &PeerId, ctx: &mut impl SubsystemContext, @@ -267,7 +267,7 @@ async fn notify_one_we_are_awaiting_many( } /// Distribute a PoV to peers who are awaiting it. -#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(subsystem = LOG_TARGET))] async fn distribute_to_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -408,7 +408,7 @@ async fn determine_relevant_validators( } /// Handles a `FetchPoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(subsystem = LOG_TARGET))] async fn handle_fetch( state: &mut State, ctx: &mut impl SubsystemContext, @@ -460,7 +460,7 @@ async fn handle_fetch( } /// Handles a `DistributePoV` message. -#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn handle_distribute( state: &mut State, ctx: &mut impl SubsystemContext, @@ -512,7 +512,7 @@ async fn handle_distribute( } /// Report a reputation change for a peer. -#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn report_peer( ctx: &mut impl SubsystemContext, peer: PeerId, @@ -522,7 +522,7 @@ async fn report_peer( } /// Handle a notification from a peer that they are awaiting some PoVs. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_awaiting( state: &mut State, ctx: &mut impl SubsystemContext, @@ -576,7 +576,7 @@ async fn handle_awaiting( /// Handle an incoming PoV from our peer. Reports them if unexpected, rewards them if not. /// /// Completes any requests awaiting that PoV. -#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state, encoded_pov), fields(subsystem = LOG_TARGET))] async fn handle_incoming_pov( state: &mut State, ctx: &mut impl SubsystemContext, @@ -663,7 +663,7 @@ fn handle_validator_connected(state: &mut State, peer_id: PeerId) { } /// Handles a network bridge update. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, @@ -733,7 +733,7 @@ impl PoVDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run( self, ctx: impl SubsystemContext, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 9c0f1bf7bf42..5527c6344ccb 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -162,7 +162,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option { let already_known = self.sent_statements.contains(fingerprint) || self.received_statements.contains(fingerprint); @@ -211,7 +211,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, fingerprint: &(CompactStatement, ValidatorIndex), @@ -278,7 +278,7 @@ impl PeerData { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send( &mut self, relay_parent: &Hash, @@ -303,7 +303,7 @@ impl PeerData { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, relay_parent: &Hash, @@ -422,7 +422,7 @@ impl ActiveHeadData { /// /// Any other statements or those that reference a candidate we are not aware of cannot be accepted /// and will return `NotedStatement::NotUseful`. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -503,7 +503,7 @@ fn check_statement_signature( /// circulates the statement to all peers who have not seen it yet, and /// sends all statements dependent on that statement to peers who could previously not receive /// them but now can. -#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn circulate_statement_and_dependents( peers: &mut HashMap, active_heads: &mut HashMap, @@ -568,7 +568,7 @@ fn statement_message(relay_parent: Hash, statement: SignedFullStatement) /// Circulates a statement to all peers who have not seen it yet, and returns /// an iterator over peers who need to have dependent statements sent. -#[tracing::instrument(level = "trace", skip(peers, ctx), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] async fn circulate_statement( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -602,7 +602,7 @@ async fn circulate_statement( } /// Send all statements about a given candidate hash to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements_about( peer: PeerId, peer_data: &mut PeerData, @@ -629,7 +629,7 @@ async fn send_statements_about( } /// Send all statements at a given relay-parent to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements( peer: PeerId, peer_data: &mut PeerData, @@ -669,7 +669,7 @@ async fn report_peer( // // This function checks the signature and ensures the statement is compatible with our // view. It also notifies candidate backing if the statement was previously unknown. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn handle_incoming_message<'a>( peer: PeerId, peer_data: &mut PeerData, @@ -766,7 +766,7 @@ async fn handle_incoming_message<'a>( } /// Update a peer's view. Sends all newly unlocked statements based on the previous -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn update_peer_view_and_send_unlocked( peer: PeerId, peer_data: &mut PeerData, @@ -801,7 +801,7 @@ async fn update_peer_view_and_send_unlocked( } } -#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(target = LOG_TARGET))] +#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_update( peers: &mut HashMap, active_heads: &mut HashMap, @@ -889,7 +889,7 @@ async fn handle_network_update( } impl StatementDistribution { - #[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 696d6db66c63..ae88e53805da 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -197,19 +197,19 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn block_imported(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockImported(block)).await } /// Send some message to one of the `Subsystem`s. - #[tracing::instrument(level = "trace", skip(self, msg), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, msg), fields(subsystem = LOG_TARGET))] pub async fn send_msg(&mut self, msg: impl Into) { self.send_and_log_error(Event::MsgToSubsystem(msg.into())).await } /// Inform the `Overseer` that some block was finalized. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn block_finalized(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockFinalized(block)).await } @@ -221,7 +221,7 @@ impl OverseerHandler { /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, /// the response channel may never return if the hash was deactivated before this call. /// In this case, it's the caller's responsibility to ensure a timeout is set. - #[tracing::instrument(level = "trace", skip(self, response_channel), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, response_channel), fields(subsystem = LOG_TARGET))] pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) { self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation { hash, @@ -230,7 +230,7 @@ impl OverseerHandler { } /// Tell `Overseer` to shutdown. - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn stop(&mut self) { self.send_and_log_error(Event::Stop).await } @@ -1776,7 +1776,7 @@ where } /// Run the `Overseer`. - #[tracing::instrument(skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(skip(self), fields(subsystem = LOG_TARGET))] pub async fn run(mut self) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1856,7 +1856,7 @@ where } } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { match self.active_leaves.entry(block.hash) { hash_map::Entry::Vacant(entry) => entry.insert(block.number), @@ -1880,7 +1880,7 @@ where self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1909,7 +1909,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { self.candidate_validation_subsystem.send_signal(signal.clone()).await?; self.candidate_backing_subsystem.send_signal(signal.clone()).await?; @@ -1934,7 +1934,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn route_message(&mut self, msg: MaybeTimed) -> SubsystemResult<()> { let msg = msg.into_inner(); self.metrics.on_message_relayed(); @@ -2001,7 +2001,7 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_activated(&mut self, hash: &Hash, parent_hash: Option) -> Arc { self.metrics.on_head_activated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -2022,14 +2022,14 @@ where span } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); self.activation_external_listeners.remove(hash); self.span_per_active_leaf.remove(hash); } - #[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn clean_up_external_listeners(&mut self) { self.activation_external_listeners.retain(|_, v| { // remove dead listeners @@ -2038,7 +2038,7 @@ where }) } - #[tracing::instrument(level = "trace", skip(self, request), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, request), fields(subsystem = LOG_TARGET))] fn handle_external_request(&mut self, request: ExternalRequest) { match request { ExternalRequest::WaitForActivation { hash, response_channel } => { From 16beffc92d1856d5dd84c2b0b142e176f4fe0d97 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 17:29:41 +0100 Subject: [PATCH 12/13] target -> subsystem in instrumentatio macro target is not correct either, and the correct way of using a top level target = LOG_TARGET does not work, as the macro expects a string literal and gets confused by the constant `LOG_TARGET`. --- .../availability-distribution/src/requester/fetch_task/mod.rs | 4 ++-- node/network/availability-distribution/src/requester/mod.rs | 4 ++-- node/network/availability-distribution/src/session_cache.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index bba72ae6358d..b2a282f2609a 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -175,7 +175,7 @@ impl FetchTask { /// Start fetching a chunk. /// /// A task handling the fetching of the configured chunk will be spawned. - #[tracing::instrument(level = "trace", skip(config, ctx), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(config, ctx), fields(subsystem = LOG_TARGET))] pub async fn start(config: FetchTaskConfig, ctx: &mut Context) -> Result where Context: SubsystemContext, @@ -248,7 +248,7 @@ enum TaskError { } impl RunningTask { - #[tracing::instrument(level = "trace", skip(self, kill), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, kill), fields(subsystem = LOG_TARGET))] async fn run(self, kill: oneshot::Receiver<()>) { // Wait for completion/or cancel. let run_it = self.run_inner(); diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index c231f68c6aa9..f613632ccbde 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -74,7 +74,7 @@ impl Requester { /// /// You must feed it with `ActiveLeavesUpdate` via `update_fetching_heads` and make it progress /// by advancing the stream. - #[tracing::instrument(level = "trace", skip(keystore, metrics), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(keystore, metrics), fields(subsystem = LOG_TARGET))] pub fn new(keystore: SyncCryptoStorePtr, metrics: Metrics) -> Self { // All we do is forwarding messages, no need to make this big. // Each sender will get one slot, see @@ -91,7 +91,7 @@ impl Requester { /// Update heads that need availability distribution. /// /// For all active heads we will be fetching our chunks for availabilty distribution. - #[tracing::instrument(level = "trace", skip(self, ctx, update), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, ctx, update), fields(subsystem = LOG_TARGET))] pub async fn update_fetching_heads( &mut self, ctx: &mut Context, diff --git a/node/network/availability-distribution/src/session_cache.rs b/node/network/availability-distribution/src/session_cache.rs index 9e9dd6e9d9db..0212717767ba 100644 --- a/node/network/availability-distribution/src/session_cache.rs +++ b/node/network/availability-distribution/src/session_cache.rs @@ -116,7 +116,7 @@ impl SessionCache { /// /// Use this function over any `fetch_session_info` if all you need is a reference to /// `SessionInfo`, as it avoids an expensive clone. - #[tracing::instrument(level = "trace", skip(self, ctx, with_info), fields(target = LOG_TARGET))] + #[tracing::instrument(level = "trace", skip(self, ctx, with_info), fields(subsystem = LOG_TARGET))] pub async fn with_session_info( &mut self, ctx: &mut Context, From 2f5ceea57cc8e9f72fa217abef23cb731305eacb Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 4 Mar 2021 17:40:21 +0100 Subject: [PATCH 13/13] Use kebab-case for spa names. Co-authored-by: Andronik Ordian --- node/network/availability-distribution/src/responder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/network/availability-distribution/src/responder.rs b/node/network/availability-distribution/src/responder.rs index 3e233654c9b5..6704de72a8b6 100644 --- a/node/network/availability-distribution/src/responder.rs +++ b/node/network/availability-distribution/src/responder.rs @@ -64,9 +64,9 @@ pub async fn answer_request( where Context: SubsystemContext, { - let mut span = jaeger::candidate_hash_span(&req.payload.candidate_hash, "answer_request"); + let mut span = jaeger::candidate_hash_span(&req.payload.candidate_hash, "answer-request"); span.add_stage(jaeger::Stage::AvailabilityDistribution); - let _child_span = span.child_builder("answer_chunk_request") + let _child_span = span.child_builder("answer-chunk-request") .with_chunk_index(req.payload.index.0) .build();