diff --git a/core/benches/receive_and_buffer_utils.rs b/core/benches/receive_and_buffer_utils.rs index d961bab2322526..58c9bc1634cbd8 100644 --- a/core/benches/receive_and_buffer_utils.rs +++ b/core/benches/receive_and_buffer_utils.rs @@ -83,9 +83,11 @@ fn generate_transactions( ) -> BankingPacketBatch { assert!(num_instructions_per_tx <= MAX_INSTRUCTIONS_PER_TRANSACTION); if set_rand_cu_price { - assert!(num_instructions_per_tx > 0, - "`num_instructions_per_tx` must be at least 1 when `set_rand_cu_price` flag is set to count\ - the set_compute_unit_price instruction."); + assert!( + num_instructions_per_tx > 0, + "`num_instructions_per_tx` must be at least 1 when `set_rand_cu_price` flag is set to \ + count the set_compute_unit_price instruction." + ); } let blockhash = FaultyBlockhash::new(bank.last_blockhash(), probability_invalid_blockhash); diff --git a/core/benches/scheduler.rs b/core/benches/scheduler.rs index 51b3bc2d295e73..d003f3b21dda55 100644 --- a/core/benches/scheduler.rs +++ b/core/benches/scheduler.rs @@ -133,8 +133,10 @@ fn bench_scheduler_impl( for (ix_count, ix_count_desc) in &ix_counts { for (tx_count, tx_count_desc) in &tx_counts { for (conflict_type, conflict_type_desc) in &conflict_types { - let bench_name = - format!("{bench_name}/{scheduler_desc}/{ix_count_desc}/{tx_count_desc}/{conflict_type_desc}"); + let bench_name = format!( + "{bench_name}/{scheduler_desc}/{ix_count_desc}/{tx_count_desc}/\ + {conflict_type_desc}" + ); group.throughput(Throughput::Elements(*tx_count as u64)); group.bench_function(&bench_name, |bencher| { bencher.iter_custom(|iters| { diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index e824fb74eb8605..6f6e217a620a21 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -246,14 +246,14 @@ impl AccountsHashVerifier { match accounts_package.accounts_hash_algorithm { AccountsHashAlgorithm::Merkle => { debug!( - "calculate_and_verify_accounts_hash(): snapshots lt hash is disabled, \ - DO merkle-based accounts hash calculation", + "calculate_and_verify_accounts_hash(): snapshots lt hash is disabled, DO \ + merkle-based accounts hash calculation", ); } AccountsHashAlgorithm::Lattice => { debug!( - "calculate_and_verify_accounts_hash(): snapshots lt hash is enabled, \ - SKIP merkle-based accounts hash calculation", + "calculate_and_verify_accounts_hash(): snapshots lt hash is enabled, SKIP \ + merkle-based accounts hash calculation", ); return Ok((MerkleOrLatticeAccountsHash::Lattice, None)); } @@ -284,6 +284,7 @@ impl AccountsHashVerifier { let Some((base_accounts_hash, base_capitalization)) = accounts_db.get_accounts_hash(base_slot) else { + #[rustfmt::skip] panic!( "incremental snapshot requires accounts hash and capitalization from \ the full snapshot it is based on\n\ @@ -447,7 +448,10 @@ impl AccountsHashVerifier { let MerkleOrLatticeAccountsHash::Merkle(AccountsHashKind::Full(accounts_hash)) = merkle_or_lattice_accounts_hash else { - panic!("EAH requires a full accounts hash, but was given {merkle_or_lattice_accounts_hash:?}"); + panic!( + "EAH requires a full accounts hash, but was given \ + {merkle_or_lattice_accounts_hash:?}" + ); }; info!( "saving epoch accounts hash, slot: {}, hash: {}", diff --git a/core/src/banking_simulation.rs b/core/src/banking_simulation.rs index 74519135cc8a67..dc1b7f62c64fc0 100644 --- a/core/src/banking_simulation.rs +++ b/core/src/banking_simulation.rs @@ -193,8 +193,8 @@ impl BankingTraceEvents { ) { // Silence errors here as this can happen under normal operation... warn!( - "Reading {:?} failed {:?} due to file corruption or unclean validator shutdown", - event_file_path, read_result, + "Reading {event_file_path:?} failed {read_result:?} due to file corruption or \ + unclean validator shutdown", ); } else { read_result? @@ -342,10 +342,14 @@ struct SenderLoop { impl SenderLoop { fn log_starting(&self) { info!( - "simulating events: {} (out of {}), starting at slot {} (based on {} from traced event slot: {}) (warmup: -{:?})", - self.timed_batches_to_send.len(), self.total_batch_count, self.first_simulated_slot, + "simulating events: {} (out of {}), starting at slot {} (based on {} from traced \ + event slot: {}) (warmup: -{:?})", + self.timed_batches_to_send.len(), + self.total_batch_count, + self.first_simulated_slot, SenderLoopLogger::format_as_timestamp(self.raw_base_event_time), - self.parent_slot, WARMUP_DURATION, + self.parent_slot, + WARMUP_DURATION, ); } @@ -594,10 +598,7 @@ impl<'a> SenderLoopLogger<'a> { batch_count: usize, tx_count: usize, ) { - debug!( - "sent {:?} {} batches ({} txes)", - label, batch_count, tx_count - ); + debug!("sent {label:?} {batch_count} batches ({tx_count} txes)"); use ChannelLabel::*; let (total_batch_count, total_tx_count) = match label { @@ -625,9 +626,16 @@ impl<'a> SenderLoopLogger<'a> { let gossip_vote_tps = (self.gossip_vote_tx_count - self.last_gossip_vote_tx_count) as f64 / duration; info!( - "senders(non-,tpu-,gossip-vote): tps: {:.0} (={:.0}+{:.0}+{:.0}) over {:?} not-recved: ({}+{}+{})", - tps, non_vote_tps, tpu_vote_tps, gossip_vote_tps, log_interval, - self.non_vote_sender.len(), self.tpu_vote_sender.len(), self.gossip_vote_sender.len(), + "senders(non-,tpu-,gossip-vote): tps: {:.0} (={:.0}+{:.0}+{:.0}) over {:?} \ + not-recved: ({}+{}+{})", + tps, + non_vote_tps, + tpu_vote_tps, + gossip_vote_tps, + log_interval, + self.non_vote_sender.len(), + self.tpu_vote_sender.len(), + self.gossip_vote_sender.len(), ); self.last_log_duration = simulation_duration; self.last_tx_count = current_tx_count; @@ -763,10 +771,7 @@ impl BankingSimulator { ))) .unwrap(); assert!(retracer.is_enabled()); - info!( - "Enabled banking retracer (dir_byte_limit: {})", - BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, - ); + info!("Enabled banking retracer (dir_byte_limit: {BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT})",); // Create a partially-dummy ClusterInfo for the banking stage. let cluster_info_for_banking = Arc::new(DummyClusterInfo { diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs index 9fd41225eb67ef..7c6d11d92da077 100644 --- a/core/src/banking_trace.rs +++ b/core/src/banking_trace.rs @@ -450,10 +450,7 @@ impl TracedSender { TracedEvent::PacketBatch(self.label, BankingPacketBatch::clone(&batch)), )) .map_err(|err| { - error!( - "unexpected error when tracing a banking event...: {:?}", - err - ); + error!("unexpected error when tracing a banking event...: {err:?}"); SendError(BankingPacketBatch::clone(&batch)) })?; } diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index ce2b0a29782e7f..f8dbe38565c225 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -545,7 +545,7 @@ impl ClusterInfoVoteListener { sender .send(BankNotification::OptimisticallyConfirmed(slot)) .unwrap_or_else(|err| { - warn!("bank_notification_sender failed: {:?}", err) + warn!("bank_notification_sender failed: {err:?}") }); } } diff --git a/core/src/cluster_slots_service/cluster_slots.rs b/core/src/cluster_slots_service/cluster_slots.rs index 2868b53e66856f..8f9624d1974d31 100644 --- a/core/src/cluster_slots_service/cluster_slots.rs +++ b/core/src/cluster_slots_service/cluster_slots.rs @@ -247,7 +247,7 @@ impl ClusterSlots { let epoch_metadata = self.epoch_metadata.read().unwrap(); //startup init, this is very slow but only ever happens once if cluster_slots.is_empty() { - info!("Init cluster_slots at range {:?}", slot_range); + info!("Init cluster_slots at range {slot_range:?}"); for slot in slot_range.clone() { // Epoch should be defined for all slots in the window let epoch = self @@ -293,7 +293,10 @@ impl ClusterSlots { .get_epoch_for_slot(slot) .expect("Epoch should be defined for all slots in the window"); let Some(stake_info) = epoch_metadata.get(&epoch) else { - warn!("Epoch slots can not reuse slot entry for slot {slot} since stakes for epoch {epoch} are not available"); + warn!( + "Epoch slots can not reuse slot entry for slot {slot} since stakes for epoch \ + {epoch} are not available" + ); cluster_slots.push_back(RowContent { slot, supporters: Arc::new(SlotSupporters::new_blank()), @@ -515,8 +518,7 @@ mod tests { assert_eq!( rg.len(), CLUSTER_SLOTS_TRIM_SIZE, - "ring should have exactly {} elements", - CLUSTER_SLOTS_TRIM_SIZE + "ring should have exactly {CLUSTER_SLOTS_TRIM_SIZE} elements" ); assert_eq!(rg.front().unwrap().slot, 1, "first slot should be root + 1"); assert_eq!( diff --git a/core/src/completed_data_sets_service.rs b/core/src/completed_data_sets_service.rs index ecb4d674b57ec7..840e46b4c8c82e 100644 --- a/core/src/completed_data_sets_service.rs +++ b/core/src/completed_data_sets_service.rs @@ -74,7 +74,7 @@ impl CompletedDataSetsService { rpc_subscriptions.notify_signatures_received((slot, transactions)); } } - Err(e) => warn!("completed-data-set-service deserialize error: {:?}", e), + Err(e) => warn!("completed-data-set-service deserialize error: {e:?}"), } slot }; diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 4d6c1e24c7fcdf..d4b4788aac3f5a 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -407,7 +407,7 @@ impl Tower { if voted_stake == 0 { continue; } - trace!("{} {} with stake {}", vote_account_pubkey, key, voted_stake); + trace!("{vote_account_pubkey} {key} with stake {voted_stake}"); let mut vote_state = TowerVoteState::from(account.vote_state_view()); for vote in &vote_state.votes { lockout_intervals @@ -418,7 +418,7 @@ impl Tower { if key == *vote_account_pubkey { my_latest_landed_vote = vote_state.nth_recent_lockout(0).map(|l| l.slot()); - debug!("vote state {:?}", vote_state); + debug!("vote state {vote_state:?}"); debug!( "observed slot {}", vote_state @@ -578,8 +578,8 @@ impl Tower { if let Some(last_voted_slot) = self.last_vote.last_voted_slot() { if heaviest_slot_on_same_fork <= last_voted_slot { warn!( - "Trying to refresh timestamp for vote on {last_voted_slot} \ - using smaller heaviest bank {heaviest_slot_on_same_fork}" + "Trying to refresh timestamp for vote on {last_voted_slot} using smaller \ + heaviest bank {heaviest_slot_on_same_fork}" ); return; } @@ -961,7 +961,7 @@ impl Tower { vote({last_voted_slot}), meaning some inconsistency between saved tower and \ ledger." ); - warn!("{}", message); + warn!("{message}"); datapoint_warn!("tower_warn", ("warn", message, String)); } &empty_ancestors @@ -1116,8 +1116,8 @@ impl Tower { last_vote_ancestors, ) .expect( - "candidate_slot and switch_slot exist in descendants map, \ - so they must exist in ancestors map", + "candidate_slot and switch_slot exist in descendants map, so they \ + must exist in ancestors map", ) } { @@ -1255,11 +1255,7 @@ impl Tower { ); let new_check = Some((switch_slot, decision.clone())); if new_check != self.last_switch_threshold_check { - trace!( - "new switch threshold check: slot {}: {:?}", - switch_slot, - decision, - ); + trace!("new switch threshold check: slot {switch_slot}: {decision:?}",); self.last_switch_threshold_check = new_check; } decision @@ -1472,7 +1468,7 @@ impl Tower { "For some reason, we're REPROCESSING slots which has already been voted and \ ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!", ); - error!("{}", message); + error!("{message}"); datapoint_error!("tower_error", ("error", message, String)); // Let's pass-through adjust_lockouts_with_slot_history just for sanitization, @@ -1571,7 +1567,7 @@ impl Tower { } // Check for errors if not anchored - info!("adjusted tower's anchored slot: {:?}", anchored_slot); + info!("adjusted tower's anchored slot: {anchored_slot:?}"); if anchored_slot.is_none() { // this error really shouldn't happen unless ledger/tower is corrupted return Err(TowerError::FatallyInconsistent( @@ -1737,9 +1733,8 @@ pub fn reconcile_blockstore_roots_with_external_source( .collect(); if !new_roots.is_empty() { info!( - "Reconciling slots as root based on external root: {:?} (external: {:?}, \ - blockstore: {})", - new_roots, external_source, last_blockstore_root + "Reconciling slots as root based on external root: {new_roots:?} (external: \ + {external_source:?}, blockstore: {last_blockstore_root})" ); // Unfortunately, we can't supply duplicate-confirmed hashes, @@ -1761,10 +1756,9 @@ pub fn reconcile_blockstore_roots_with_external_source( // That's because we might have a chance of recovering properly with // newer snapshot. warn!( - "Couldn't find any ancestor slots from external source ({:?}) towards blockstore \ - root ({}); blockstore pruned or only tower moved into new ledger or just hard \ - fork?", - external_source, last_blockstore_root, + "Couldn't find any ancestor slots from external source ({external_source:?}) \ + towards blockstore root ({last_blockstore_root}); blockstore pruned or only \ + tower moved into new ledger or just hard fork?", ); } } diff --git a/core/src/consensus/fork_choice.rs b/core/src/consensus/fork_choice.rs index 6b9bcac4b54e04..5cbf88caa73209 100644 --- a/core/src/consensus/fork_choice.rs +++ b/core/src/consensus/fork_choice.rs @@ -144,11 +144,8 @@ fn recheck_fork_decision_failed_switch_threshold( // then there will be no blocks to include the votes for slot 4, and the network halts // because 90% of validators can't vote info!( - "Waiting to switch vote to {heaviest_bank_slot}, \ - resetting to slot {:?} for now, \ - switch proof stake: {switch_proof_stake}, \ - threshold stake: {}, \ - total stake: {total_stake}", + "Waiting to switch vote to {heaviest_bank_slot}, resetting to slot {:?} for now, switch \ + proof stake: {switch_proof_stake}, threshold stake: {}, total stake: {total_stake}", reset_bank.as_ref().map(|b| b.slot()), total_stake as f64 * SWITCH_FORK_THRESHOLD, ); diff --git a/core/src/consensus/heaviest_subtree_fork_choice.rs b/core/src/consensus/heaviest_subtree_fork_choice.rs index 43935c55908eb2..8440f2f05ae0f8 100644 --- a/core/src/consensus/heaviest_subtree_fork_choice.rs +++ b/core/src/consensus/heaviest_subtree_fork_choice.rs @@ -143,9 +143,9 @@ impl ForkInfo { if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor { if latest_invalid_ancestor <= newly_valid_ancestor { info!( - "Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was \ - duplicate confirmed", - my_key, latest_invalid_ancestor, newly_valid_ancestor + "Fork choice for {my_key:?} clearing latest invalid ancestor \ + {latest_invalid_ancestor:?} because {newly_valid_ancestor:?} was duplicate \ + confirmed" ); self.latest_invalid_ancestor = None; } @@ -936,10 +936,7 @@ impl HeaviestSubtreeForkChoice { let fork_info = self.fork_infos.get_mut(&slot_hash_key).unwrap(); if is_duplicate_confirmed { if !fork_info.is_duplicate_confirmed { - info!( - "Fork choice setting {:?} to duplicate confirmed", - slot_hash_key - ); + info!("Fork choice setting {slot_hash_key:?} to duplicate confirmed"); } fork_info.set_duplicate_confirmed(); } @@ -1038,8 +1035,8 @@ impl HeaviestSubtreeForkChoice { { assert!(if new_vote_slot == old_latest_vote_slot { warn!( - "Got a duplicate vote for validator: {pubkey}, \ - slot_hash: {new_vote_slot_hash:?}", + "Got a duplicate vote for validator: {pubkey}, slot_hash: \ + {new_vote_slot_hash:?}", ); // If the slots are equal, then the new // vote must be for a smaller hash @@ -1331,10 +1328,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice { } fn mark_fork_invalid_candidate(&mut self, invalid_slot_hash_key: &SlotHashKey) { - info!( - "marking fork starting at: {:?} invalid candidate", - invalid_slot_hash_key - ); + info!("marking fork starting at: {invalid_slot_hash_key:?} invalid candidate"); let fork_info = self.fork_infos.get_mut(invalid_slot_hash_key); if let Some(fork_info) = fork_info { // Should not be marking duplicate confirmed blocks as invalid candidates @@ -1359,10 +1353,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice { } fn mark_fork_valid_candidate(&mut self, valid_slot_hash_key: &SlotHashKey) -> Vec { - info!( - "marking fork starting at: {:?} valid candidate", - valid_slot_hash_key - ); + info!("marking fork starting at: {valid_slot_hash_key:?} valid candidate"); let mut newly_duplicate_confirmed_ancestors = vec![]; for ancestor_key in std::iter::once(*valid_slot_hash_key) diff --git a/core/src/consensus/progress_map.rs b/core/src/consensus/progress_map.rs index 801a3302d56f80..6d12552124c81f 100644 --- a/core/src/consensus/progress_map.rs +++ b/core/src/consensus/progress_map.rs @@ -402,13 +402,8 @@ impl ProgressMap { pub fn log_propagated_stats(&self, slot: Slot, bank_forks: &RwLock) { if let Some(stats) = self.get_propagated_stats(slot) { info!( - "Propagated stats: \ - total staked: {}, \ - observed staked: {}, \ - vote pubkeys: {:?}, \ - node_pubkeys: {:?}, \ - slot: {slot}, \ - epoch: {:?}", + "Propagated stats: total staked: {}, observed staked: {}, vote pubkeys: {:?}, \ + node_pubkeys: {:?}, slot: {slot}, epoch: {:?}", stats.total_epoch_stake, stats.propagated_validators_stake, stats.propagated_validators, diff --git a/core/src/consensus/tower_storage.rs b/core/src/consensus/tower_storage.rs index bbb39c1621f2b7..534ce829241f8b 100644 --- a/core/src/consensus/tower_storage.rs +++ b/core/src/consensus/tower_storage.rs @@ -292,7 +292,7 @@ impl TowerStorage for EtcdTowerStorage { self.runtime .block_on(async { self.client.lock().await.txn(txn).await }) .map_err(|err| { - error!("Failed to acquire etcd instance lock: {}", err); + error!("Failed to acquire etcd instance lock: {err}"); Self::etdc_to_tower_error(err) })?; @@ -308,7 +308,7 @@ impl TowerStorage for EtcdTowerStorage { .runtime .block_on(async { self.client.lock().await.txn(txn).await }) .map_err(|err| { - error!("Failed to read etcd saved tower: {}", err); + error!("Failed to read etcd saved tower: {err}"); Self::etdc_to_tower_error(err) })?; @@ -353,7 +353,7 @@ impl TowerStorage for EtcdTowerStorage { .runtime .block_on(async { self.client.lock().await.txn(txn).await }) .map_err(|err| { - error!("Failed to write etcd saved tower: {}", err); + error!("Failed to write etcd saved tower: {err}"); err }) .map_err(Self::etdc_to_tower_error)?; diff --git a/core/src/consensus/tower_vote_state.rs b/core/src/consensus/tower_vote_state.rs index 9b402cf61e7dc0..f1a4b4693f8639 100644 --- a/core/src/consensus/tower_vote_state.rs +++ b/core/src/consensus/tower_vote_state.rs @@ -70,9 +70,11 @@ impl TowerVoteState { for (i, v) in self.votes.iter_mut().enumerate() { // Don't increase the lockout for this vote until we get more confirmations // than the max number of confirmations this vote has seen - if stack_depth > - i.checked_add(v.confirmation_count() as usize) - .expect("`confirmation_count` and tower_size should be bounded by `MAX_LOCKOUT_HISTORY`") + if stack_depth + > i.checked_add(v.confirmation_count() as usize).expect( + "`confirmation_count` and tower_size should be bounded by \ + `MAX_LOCKOUT_HISTORY`", + ) { v.increase_confirmation_count(1); } diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 8a5102c3824c6c..6a2f1faf3161c0 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -238,7 +238,7 @@ impl FetchStage { Error::RecvTimeout(RecvTimeoutError::Timeout) => (), Error::Recv(_) => break, Error::Send => break, - _ => error!("{:?}", e), + _ => error!("{e:?}"), } } }) diff --git a/core/src/forwarding_stage.rs b/core/src/forwarding_stage.rs index 837ac9ad766a55..aa5dfdd129e60d 100644 --- a/core/src/forwarding_stage.rs +++ b/core/src/forwarding_stage.rs @@ -279,8 +279,8 @@ impl { let Some(packet_data) = packet.data(..) else { unreachable!( - "packet.meta().discard() was already checked. \ - If not discarded, packet MUST have data" + "packet.meta().discard() was already checked. If not discarded, packet \ + MUST have data" ); }; diff --git a/core/src/optimistic_confirmation_verifier.rs b/core/src/optimistic_confirmation_verifier.rs index e8177ec14f4335..ced702e906a53a 100644 --- a/core/src/optimistic_confirmation_verifier.rs +++ b/core/src/optimistic_confirmation_verifier.rs @@ -109,15 +109,9 @@ impl OptimisticConfirmationVerifier { .unwrap_or(0); error!( - "{}, \ - hash: {hash}, \ - epoch: {epoch}, \ - voted keys: {:?}, \ - root: {root}, \ - root bank hash: {}, \ - voted stake: {voted_stake}, \ - total epoch stake: {total_epoch_stake}, \ - pct: {}", + "{}, hash: {hash}, epoch: {epoch}, voted keys: {:?}, root: {root}, root bank \ + hash: {}, voted stake: {voted_stake}, total epoch stake: \ + {total_epoch_stake}, pct: {}", Self::format_optimistic_confirmed_slot_violation_log(*optimistic_slot), r_slot_tracker .as_ref() diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 8ceaac890cc5d6..74ccf219f71251 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -128,7 +128,7 @@ impl AncestorRepairRequestsStats { let repair_total = self.ancestor_requests.count; if self.last_report.elapsed().as_secs() > 2 && repair_total > 0 { - info!("ancestor_repair_requests_stats: {:?}", slot_to_count); + info!("ancestor_repair_requests_stats: {slot_to_count:?}"); datapoint_info!( "ancestor-repair", ("ancestor-repair-count", self.ancestor_requests.count, i64) @@ -741,8 +741,8 @@ impl AncestorHashesService { for (slot, request_type) in potential_slot_requests.take(number_of_allowed_requests) { warn!( - "Cluster froze slot: {slot}, but we marked it as {}. \ - Initiating protocol to sample cluster for dead slot ancestors.", + "Cluster froze slot: {slot}, but we marked it as {}. Initiating protocol to \ + sample cluster for dead slot ancestors.", if request_type.is_pruned() { "pruned" } else { diff --git a/core/src/repair/cluster_slot_state_verifier.rs b/core/src/repair/cluster_slot_state_verifier.rs index 5c42c3bf267321..0cae312aeb110a 100644 --- a/core/src/repair/cluster_slot_state_verifier.rs +++ b/core/src/repair/cluster_slot_state_verifier.rs @@ -377,8 +377,8 @@ fn check_duplicate_confirmed_hash_against_bank_status( // If the cluster duplicate confirmed some version of this slot, then // there's another version of our dead slot warn!( - "Cluster duplicate confirmed slot {} with hash {}, but we marked slot dead", - slot, duplicate_confirmed_hash + "Cluster duplicate confirmed slot {slot} with hash {duplicate_confirmed_hash}, \ + but we marked slot dead" ); state_changes.push(ResultingStateChange::RepairDuplicateConfirmedVersion( duplicate_confirmed_hash, @@ -397,8 +397,8 @@ fn check_duplicate_confirmed_hash_against_bank_status( // Modify fork choice rule to exclude our version from being voted // on and also repair the correct version warn!( - "Cluster duplicate confirmed slot {} with hash {}, but our version has hash {}", - slot, duplicate_confirmed_hash, bank_frozen_hash + "Cluster duplicate confirmed slot {slot} with hash {duplicate_confirmed_hash}, \ + but our version has hash {bank_frozen_hash}" ); state_changes.push(ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash)); state_changes.push(ResultingStateChange::RepairDuplicateConfirmedVersion( @@ -435,8 +435,8 @@ fn check_epoch_slots_hash_against_bank_status( BankStatus::Frozen(bank_frozen_hash) => { // The epoch slots hash does not match our frozen hash. warn!( - "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \ - but our version has hash {bank_frozen_hash:?}", + "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \ + our version has hash {bank_frozen_hash:?}", ); if !is_popular_pruned { // If the slot is not already pruned notify fork choice to mark as invalid @@ -446,8 +446,8 @@ fn check_epoch_slots_hash_against_bank_status( BankStatus::Dead => { // Cluster sample found a hash for our dead slot, we must have the wrong version warn!( - "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \ - but we marked slot dead", + "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \ + we marked slot dead", ); } BankStatus::Unprocessed => { @@ -456,8 +456,8 @@ fn check_epoch_slots_hash_against_bank_status( assert!(is_popular_pruned); // The cluster sample found the troublesome slot which caused this fork to be pruned warn!( - "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \ - but we have pruned it due to incorrect ancestry" + "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \ + we have pruned it due to incorrect ancestry" ); } } @@ -645,7 +645,8 @@ fn on_epoch_slots_frozen( if epoch_slots_frozen_hash != duplicate_confirmed_hash { warn!( "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \ - but we already saw duplicate confirmation on hash: {duplicate_confirmed_hash:?}", + but we already saw duplicate confirmation on hash: \ + {duplicate_confirmed_hash:?}", ); } return vec![]; @@ -857,8 +858,8 @@ pub(crate) fn check_slot_agrees_with_cluster( slot_state_update: SlotStateUpdate, ) { info!( - "check_slot_agrees_with_cluster() slot: {}, root: {}, slot_state_update: {:?}", - slot, root, slot_state_update + "check_slot_agrees_with_cluster() slot: {slot}, root: {root}, slot_state_update: \ + {slot_state_update:?}" ); if slot <= root { diff --git a/core/src/repair/duplicate_repair_status.rs b/core/src/repair/duplicate_repair_status.rs index 84c257da8f4ade..30a508f74bf03d 100644 --- a/core/src/repair/duplicate_repair_status.rs +++ b/core/src/repair/duplicate_repair_status.rs @@ -322,9 +322,9 @@ impl AncestorRequestStatus { agreed_response[*mismatch_i]; let mismatch_our_frozen_hash = blockstore.get_bank_hash(mismatch_slot); info!( - "When processing the ancestor sample for {}, there was a mismatch \ - for {mismatch_slot}: we had frozen hash {:?} and the cluster agreed \ - upon {mismatch_agreed_upon_hash}. However for a later ancestor \ + "When processing the ancestor sample for {}, there was a mismatch for \ + {mismatch_slot}: we had frozen hash {:?} and the cluster agreed upon \ + {mismatch_agreed_upon_hash}. However for a later ancestor \ {ancestor_slot} we have agreement on {our_frozen_hash} as the bank \ hash. This should never be possible, something is wrong or the \ cluster sample is invalid. Rejecting and queuing the ancestor hashes \ @@ -360,10 +360,9 @@ impl AncestorRequestStatus { self.requested_mismatched_slot ); } - (Some(decision), true) => panic!( - "Programmer error, {:?} should not be set in decision loop", - decision - ), + (Some(decision), true) => { + panic!("Programmer error, {decision:?} should not be set in decision loop") + } (Some(_), false) => { /* Already found a mismatch, descendants continue to mismatch as well */ } (None, true) => { /* Mismatch hasn't been found yet */ } @@ -476,9 +475,9 @@ impl AncestorRequestStatus { // replay dump then repair to fix. warn!( - "Blockstore is missing frozen hash for slot {ancestor_slot}, \ - which the cluster claims is an ancestor of dead slot {}. Potentially \ - our version of the dead slot chains to the wrong fork!", + "Blockstore is missing frozen hash for slot {ancestor_slot}, which the \ + cluster claims is an ancestor of dead slot {}. Potentially our version of \ + the dead slot chains to the wrong fork!", self.requested_mismatched_slot ); } diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 1da90833ea476f..dc374fb057994e 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -171,7 +171,7 @@ impl RepairStats { .chain(self.orphan.slot_pubkeys.iter()) .map(|(slot, slot_repairs)| (slot, slot_repairs.pubkey_repairs.values().sum::())) .collect(); - info!("repair_stats: {:?}", slot_to_count); + info!("repair_stats: {slot_to_count:?}"); if repair_total > 0 { let nonzero_num = |x| if x == 0 { None } else { Some(x) }; datapoint_info!( @@ -608,10 +608,7 @@ impl RepairService { } }); if !popular_pruned_forks.is_empty() { - warn!( - "Notifying repair of popular pruned forks {:?}", - popular_pruned_forks - ); + warn!("Notifying repair of popular pruned forks {popular_pruned_forks:?}"); popular_pruned_forks_sender .send(popular_pruned_forks) .unwrap_or_else(|err| error!("failed to send popular pruned forks {err}")); @@ -664,7 +661,9 @@ impl RepairService { Ok(()) => (), Err(SendPktsError::IoError(err, num_failed)) => { error!( - "{} batch_send failed to send {num_failed}/{num_pkts} packets first error {err:?}", repair_info.cluster_info.id() + "{} batch_send failed to send {num_failed}/{num_pkts} packets first error \ + {err:?}", + repair_info.cluster_info.id() ); } } @@ -1063,7 +1062,7 @@ impl RepairService { debug!("successfully sent repair request to {pubkey} / {address}!"); } Err(SendPktsError::IoError(err, _num_failed)) => { - error!("batch_send failed to send packet - error = {:?}", err); + error!("batch_send failed to send packet - error = {err:?}"); } } } @@ -1179,8 +1178,8 @@ impl RepairService { Ok(req) => { if let Err(e) = repair_socket.send_to(&req, repair_addr) { info!( - "repair req send_to {} ({}) error {:?}", - repair_pubkey, repair_addr, e + "repair req send_to {repair_pubkey} ({repair_addr}) error \ + {e:?}" ); } } diff --git a/core/src/repair/repair_weight.rs b/core/src/repair/repair_weight.rs index 5902038255928d..aefbadc7f58b0d 100644 --- a/core/src/repair/repair_weight.rs +++ b/core/src/repair/repair_weight.rs @@ -322,13 +322,13 @@ impl RepairWeight { pub fn split_off(&mut self, slot: Slot) -> HashSet { assert!(slot >= self.root); if slot == self.root { - error!("Trying to orphan root of repair tree {}", slot); + error!("Trying to orphan root of repair tree {slot}"); return HashSet::new(); } match self.slot_to_tree.get(&slot).copied() { Some(TreeRoot::Root(subtree_root)) => { if subtree_root == slot { - info!("{} is already orphan, skipping", slot); + info!("{slot} is already orphan, skipping"); return HashSet::new(); } let subtree = self @@ -350,10 +350,7 @@ impl RepairWeight { // If not they will once again be attached to the pruned set in // `update_orphan_ancestors`. - info!( - "Dumping pruned slot {} of tree {} in repair", - slot, subtree_root - ); + info!("Dumping pruned slot {slot} of tree {subtree_root} in repair"); let mut subtree = self .pruned_trees .remove(&subtree_root) @@ -378,10 +375,7 @@ impl RepairWeight { } } None => { - warn!( - "Trying to split off slot {} which doesn't currently exist in repair", - slot - ); + warn!("Trying to split off slot {slot} which doesn't currently exist in repair"); HashSet::new() } } @@ -440,7 +434,7 @@ impl RepairWeight { // Find all descendants of `self.root` that are not reachable from `new_root`. // Prune these out and add to `self.pruned_trees` - trace!("pruning tree {} with {}", new_root_tree_root, new_root); + trace!("pruning tree {new_root_tree_root} with {new_root}"); let (removed, pruned) = new_root_tree.purge_prune((new_root, Hash::default())); for pruned_tree in pruned { let pruned_tree_root = pruned_tree.tree_root().0; @@ -471,7 +465,7 @@ impl RepairWeight { .drain() .flat_map(|(tree_root, mut pruned_tree)| { if tree_root < new_root { - trace!("pruning tree {} with {}", tree_root, new_root); + trace!("pruning tree {tree_root} with {new_root}"); let (removed, pruned) = pruned_tree.purge_prune((new_root, Hash::default())); for (slot, _) in removed { self.slot_to_tree.remove(&slot); diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index d070cbd86d9f2f..3eb3713ddd23a4 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -516,7 +516,7 @@ impl ServeRepair { fn report_time_spent(label: &str, time: &Duration, extra: &str) { let count = time.as_millis(); if count > 5 { - info!("{} took: {} ms {}", label, count, extra); + info!("{label} took: {count} ms {extra}"); } } @@ -1248,7 +1248,8 @@ impl ServeRepair { Ok(()) => (), Err(SendPktsError::IoError(err, num_failed)) => { warn!( - "batch_send failed to send {num_failed}/{num_pkts} packets. First error: {err:?}" + "batch_send failed to send {num_failed}/{num_pkts} packets. First error: \ + {err:?}" ); } } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index fb8b4575fc6ed5..ac622232cd6a8b 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -212,9 +212,8 @@ impl PartitionInfo { ) { if self.partition_start_time.is_none() && partition_detected { warn!( - "PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset \ - slot: {}", - heaviest_slot, last_voted_slot, reset_bank_slot, + "PARTITION DETECTED waiting to join heaviest fork: {heaviest_slot} last vote: \ + {last_voted_slot:?}, reset slot: {reset_bank_slot}", ); datapoint_info!( "replay_stage-partition-start", @@ -235,8 +234,8 @@ impl PartitionInfo { self.partition_start_time = Some(Instant::now()); } else if self.partition_start_time.is_some() && !partition_detected { warn!( - "PARTITION resolved heaviest fork: {} last vote: {:?}, reset slot: {}", - heaviest_slot, last_voted_slot, reset_bank_slot + "PARTITION resolved heaviest fork: {heaviest_slot} last vote: \ + {last_voted_slot:?}, reset slot: {reset_bank_slot}" ); datapoint_info!( "replay_stage-partition-resolved", @@ -631,18 +630,14 @@ impl ReplayStage { Ok(tower) => tower, Err(err) => { error!( - "Unable to load new tower when attempting to change identity from {} \ - to {} on ReplayStage startup, Exiting: {}", - my_old_pubkey, my_pubkey, err + "Unable to load new tower when attempting to change identity from \ + {my_old_pubkey} to {my_pubkey} on ReplayStage startup, Exiting: {err}" ); // drop(_exit) will set the exit flag, eventually tearing down the entire process return; } }; - warn!( - "Identity changed during startup from {} to {}", - my_old_pubkey, my_pubkey - ); + warn!("Identity changed during startup from {my_old_pubkey} to {my_pubkey}"); } let (mut progress, mut heaviest_subtree_fork_choice) = Self::initialize_progress_and_fork_choice_with_locked_bank_forks( @@ -1078,8 +1073,8 @@ impl ReplayStage { Err(err) => { error!( "Unable to load new tower when attempting to change \ - identity from {} to {} on set-identity, Exiting: {}", - my_old_pubkey, my_pubkey, err + identity from {my_old_pubkey} to {my_pubkey} on \ + set-identity, Exiting: {err}" ); // drop(_exit) will set the exit flag, eventually tearing down the entire process return; @@ -1088,7 +1083,7 @@ impl ReplayStage { // Ensure the validator can land votes with the new identity before // becoming leader has_new_vote_been_rooted = !wait_for_vote_to_start_leader; - warn!("Identity changed from {} to {}", my_old_pubkey, my_pubkey); + warn!("Identity changed from {my_old_pubkey} to {my_pubkey}"); } Self::reset_poh_recorder( @@ -1373,8 +1368,8 @@ impl ReplayStage { progress.get_leader_propagation_slot_must_exist(start_slot) { debug!( - "Slot not propagated: start_slot={} latest_leader_slot={}", - start_slot, latest_leader_slot + "Slot not propagated: start_slot={start_slot} \ + latest_leader_slot={latest_leader_slot}" ); Self::maybe_retransmit_unpropagated_slots( "replay_stage-retransmit-timing-based", @@ -1704,7 +1699,7 @@ impl ReplayStage { bank_forks: &RwLock, blockstore: &Blockstore, ) { - warn!("purging slot {}", duplicate_slot); + warn!("purging slot {duplicate_slot}"); // Doesn't need to be root bank, just needs a common bank to // access the status cache and accounts @@ -1763,8 +1758,8 @@ impl ReplayStage { // also be a duplicate. In this case we *need* to repair it, so we clear from // blockstore. warn!( - "purging duplicate descendant: {} with slot_id {} and bank hash {}, of slot {}", - slot, slot_id, bank_hash, duplicate_slot + "purging duplicate descendant: {slot} with slot_id {slot_id} and bank hash \ + {bank_hash}, of slot {duplicate_slot}" ); // Clear the slot-related data in blockstore. This will: // 1) Clear old shreds allowing new ones to be inserted @@ -1772,7 +1767,7 @@ impl ReplayStage { // this slot blockstore.clear_unconfirmed_slot(slot); } else if slot == duplicate_slot { - warn!("purging duplicate slot: {} with slot_id {}", slot, slot_id); + warn!("purging duplicate slot: {slot} with slot_id {slot_id}"); blockstore.clear_unconfirmed_slot(slot); } else { // If a descendant was unable to replay and chained from a duplicate, it is not @@ -2009,10 +2004,7 @@ impl ReplayStage { } else { "" }; - info!( - "LEADER CHANGE at slot: {} leader: {}{}", - bank_slot, new_leader, msg - ); + info!("LEADER CHANGE at slot: {bank_slot} leader: {new_leader}{msg}"); } } current_leader.replace(new_leader.to_owned()); @@ -2107,12 +2099,12 @@ impl ReplayStage { parent_slot, } => (poh_slot, parent_slot), PohLeaderStatus::NotReached => { - trace!("{} poh_recorder hasn't reached_leader_slot", my_pubkey); + trace!("{my_pubkey} poh_recorder hasn't reached_leader_slot"); return false; } }; - trace!("{} reached_leader_slot", my_pubkey); + trace!("{my_pubkey} reached_leader_slot"); let Some(parent) = bank_forks.read().unwrap().get(parent_slot) else { warn!( @@ -2130,15 +2122,10 @@ impl ReplayStage { } if bank_forks.read().unwrap().get(poh_slot).is_some() { - warn!("{} already have bank in forks at {}?", my_pubkey, poh_slot); + warn!("{my_pubkey} already have bank in forks at {poh_slot}?"); return false; } - trace!( - "{} poh_slot {} parent_slot {}", - my_pubkey, - poh_slot, - parent_slot - ); + trace!("{my_pubkey} poh_slot {poh_slot} parent_slot {parent_slot}"); if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) { if !has_new_vote_been_rooted { @@ -2146,12 +2133,7 @@ impl ReplayStage { return false; } - trace!( - "{} leader {} at poh slot: {}", - my_pubkey, - next_leader, - poh_slot - ); + trace!("{my_pubkey} leader {next_leader} at poh slot: {poh_slot}"); // I guess I missed my slot if next_leader != *my_pubkey { @@ -2198,10 +2180,7 @@ impl ReplayStage { let root_slot = bank_forks.read().unwrap().root(); datapoint_info!("replay_stage-my_leader_slot", ("slot", poh_slot, i64),); - info!( - "new fork:{} parent:{} (leader) root:{}", - poh_slot, parent_slot, root_slot - ); + info!("new fork:{poh_slot} parent:{parent_slot} (leader) root:{root_slot}"); let root_distance = poh_slot - root_slot; let vote_only_bank = if root_distance > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY { @@ -2227,7 +2206,7 @@ impl ReplayStage { update_bank_forks_and_poh_recorder_for_new_tpu_bank(bank_forks, poh_recorder, tpu_bank); true } else { - error!("{} No next leader found", my_pubkey); + error!("{my_pubkey} No next leader found"); false } } @@ -2509,10 +2488,7 @@ impl ReplayStage { } let vote_account = match bank.get_vote_account(vote_account_pubkey) { None => { - warn!( - "Vote account {} does not exist. Unable to vote", - vote_account_pubkey, - ); + warn!("Vote account {vote_account_pubkey} does not exist. Unable to vote",); return GenerateVoteTxResult::Failed; } Some(vote_account) => vote_account, @@ -2642,7 +2618,8 @@ impl ReplayStage { last_vote_refresh_time.last_print_time = Instant::now(); warn!( "Last landed vote for slot {} in bank {} is greater than the current last vote \ - for slot: {} tracked by tower. This indicates a bug in the on chain adoption logic", + for slot: {} tracked by tower. This indicates a bug in the on chain adoption \ + logic", latest_landed_vote_slot, heaviest_bank_on_same_fork.slot(), last_voted_slot @@ -2759,7 +2736,7 @@ impl ReplayStage { tx: vote_tx, last_voted_slot, }) - .unwrap_or_else(|err| warn!("Error: {:?}", err)); + .unwrap_or_else(|err| warn!("Error: {err:?}")); last_vote_refresh_time.last_refresh_time = Instant::now(); true } else if vote_tx_result.is_non_voting() { @@ -2805,7 +2782,7 @@ impl ReplayStage { tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash); let saved_tower = SavedTower::new(tower, identity_keypair).unwrap_or_else(|err| { - error!("Unable to create saved tower: {:?}", err); + error!("Unable to create saved tower: {err:?}"); std::process::exit(1); }); @@ -2816,7 +2793,7 @@ impl ReplayStage { tower_slots, saved_tower: SavedTowerVersions::from(saved_tower), }) - .unwrap_or_else(|err| warn!("Error: {:?}", err)); + .unwrap_or_else(|err| warn!("Error: {err:?}")); } else if vote_tx_result.is_non_voting() { tower.mark_last_vote_tx_blockhash_non_voting(); } @@ -2835,7 +2812,7 @@ impl ReplayStage { total_stake, node_vote_state, )) { - trace!("lockouts_sender failed: {:?}", e); + trace!("lockouts_sender failed: {e:?}"); } } @@ -2915,7 +2892,7 @@ impl ReplayStage { .unwrap_or(false) { // If the fork was marked as dead, don't replay it - debug!("bank_slot {:?} is marked dead", bank_slot); + debug!("bank_slot {bank_slot:?} is marked dead"); replay_result.is_slot_dead = true; return replay_result; } @@ -3008,10 +2985,10 @@ impl ReplayStage { replay_result: None, }; let my_pubkey = &my_pubkey.clone(); - trace!("Replay active bank: slot {}", bank_slot); + trace!("Replay active bank: slot {bank_slot}"); if progress.get(&bank_slot).map(|p| p.is_dead).unwrap_or(false) { // If the fork was marked as dead, don't replay it - debug!("bank_slot {:?} is marked dead", bank_slot); + debug!("bank_slot {bank_slot:?} is marked dead"); replay_result.is_slot_dead = true; } else { let bank = bank_forks @@ -3246,7 +3223,7 @@ impl ReplayStage { is_leader_block, }) .unwrap_or_else(|err| { - warn!("cost_update_sender failed sending bank stats: {:?}", err) + warn!("cost_update_sender failed sending bank stats: {err:?}") }); assert_ne!(bank.hash(), Hash::default()); @@ -3307,7 +3284,7 @@ impl ReplayStage { sender .sender .send(BankNotification::Frozen(bank.clone_without_scheduler())) - .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); + .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}")); } let bank_hash = bank.hash(); @@ -3399,11 +3376,7 @@ impl ReplayStage { ) -> bool /* completed a bank */ { let active_bank_slots = bank_forks.read().unwrap().active_bank_slots(); let num_active_banks = active_bank_slots.len(); - trace!( - "{} active bank(s) to replay: {:?}", - num_active_banks, - active_bank_slots - ); + trace!("{num_active_banks} active bank(s) to replay: {active_bank_slots:?}"); if active_bank_slots.is_empty() { return false; } @@ -3594,11 +3567,9 @@ impl ReplayStage { return; } info!( - "Frozen bank vote state slot {:?} \ - is newer than our local vote state slot {:?}, \ - adopting the bank vote state as our own. \ - Bank votes: {:?}, root: {:?}, \ - Local votes: {:?}, root: {:?}", + "Frozen bank vote state slot {:?} is newer than our local vote state slot {:?}, \ + adopting the bank vote state as our own. Bank votes: {:?}, root: {:?}, Local votes: \ + {:?}, root: {:?}", bank_vote_state.last_voted_slot(), tower.vote_state.last_voted_slot(), bank_vote_state.votes, @@ -3621,8 +3592,8 @@ impl ReplayStage { .votes .retain(|lockout| lockout.slot() > local_root); info!( - "Local root is larger than on chain root, \ - overwrote bank root {:?} and updated votes {:?}", + "Local root is larger than on chain root, overwrote bank root {:?} and \ + updated votes {:?}", bank_vote_state.root_slot, bank_vote_state.votes ); @@ -4076,16 +4047,16 @@ impl ReplayStage { sender .sender .send(BankNotification::NewRootBank(root_bank)) - .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); + .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}")); if let Some(new_chain) = rooted_slots_with_parents { sender .sender .send(BankNotification::NewRootedChain(new_chain)) - .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); + .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}")); } } - info!("new root {}", new_root); + info!("new root {new_root}"); Ok(()) } @@ -4114,7 +4085,7 @@ impl ReplayStage { drop_bank_sender .send(removed_banks) - .unwrap_or_else(|err| warn!("bank drop failed: {:?}", err)); + .unwrap_or_else(|err| warn!("bank drop failed: {err:?}")); // Dropping the bank_forks write lock and reacquiring as a read lock is // safe because updates to bank_forks are only made by a single thread. @@ -4195,7 +4166,7 @@ impl ReplayStage { .expect("missing parent in bank forks"); for child_slot in children { if forks.get(child_slot).is_some() || new_banks.contains_key(&child_slot) { - trace!("child already active or frozen {}", child_slot); + trace!("child already active or frozen {child_slot}"); continue; } let leader = leader_schedule_cache diff --git a/core/src/sample_performance_service.rs b/core/src/sample_performance_service.rs index 7970f0c7c2d566..5653182aae1ccb 100644 --- a/core/src/sample_performance_service.rs +++ b/core/src/sample_performance_service.rs @@ -67,7 +67,7 @@ impl SamplePerformanceService { let highest_slot = snapshot.highest_slot; if let Err(e) = blockstore.write_perf_sample(highest_slot, &perf_sample) { - error!("write_perf_sample failed: slot {:?} {:?}", highest_slot, e); + error!("write_perf_sample failed: slot {highest_slot:?} {e:?}"); } } sleep(SLEEP_INTERVAL); diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index ab913ceabc0ff6..99dff9799837c6 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -415,7 +415,7 @@ impl SigVerifyStage { SigVerifyServiceError::Send(_) => { break; } - _ => error!("{:?}", e), + _ => error!("{e:?}"), } } if last_print.elapsed().as_secs() > 2 { @@ -526,7 +526,7 @@ mod tests { } let mut packet_s = Some(packet_s); let mut valid_received = 0; - trace!("sent: {}", sent_len); + trace!("sent: {sent_len}"); loop { if let Ok(verifieds) = verified_r.recv() { valid_received += verifieds @@ -544,7 +544,7 @@ mod tests { packet_s.take(); } } - trace!("received: {}", valid_received); + trace!("received: {valid_received}"); if use_same_tx { assert_eq!(valid_received, 1); diff --git a/core/src/snapshot_packager_service/pending_snapshot_packages.rs b/core/src/snapshot_packager_service/pending_snapshot_packages.rs index d29a5b0906173e..bea75ef292088c 100644 --- a/core/src/snapshot_packager_service/pending_snapshot_packages.rs +++ b/core/src/snapshot_packager_service/pending_snapshot_packages.rs @@ -33,8 +33,8 @@ impl PendingSnapshotPackages { pending_full_snapshot_package, ), Greater, - "full snapshot package must be newer than pending package, \ - old: {pending_full_snapshot_package:?}, new: {snapshot_package:?}", + "full snapshot package must be newer than pending package, old: \ + {pending_full_snapshot_package:?}, new: {snapshot_package:?}", ); info!( "overwrote pending full snapshot package, old slot: {}, new slot: {}", @@ -55,8 +55,8 @@ impl PendingSnapshotPackages { pending_incremental_snapshot_package, ), Greater, - "incremental snapshot package must be newer than pending package, \ - old: {pending_incremental_snapshot_package:?}, new: {snapshot_package:?}", + "incremental snapshot package must be newer than pending package, old: \ + {pending_incremental_snapshot_package:?}, new: {snapshot_package:?}", ); info!( "overwrote pending incremental snapshot package, old slot: {}, new slot: \ diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 4dcc7b2ec7e6cb..212c9256640f9b 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -426,8 +426,8 @@ impl Tpu { if let Some(tracer_thread_hdl) = self.tracer_thread_hdl { if let Err(tracer_result) = tracer_thread_hdl.join()? { error!( - "banking tracer thread returned error after successful thread join: {:?}", - tracer_result + "banking tracer thread returned error after successful thread join: \ + {tracer_result:?}" ); } } diff --git a/core/src/validator.rs b/core/src/validator.rs index 4055803a265c93..7e8cb3e60f30b5 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -468,7 +468,7 @@ impl BlockstoreRootScan { fn join(self) { if let Some(blockstore_root_scan) = self.thread { if let Err(err) = blockstore_root_scan.join() { - warn!("blockstore_root_scan failed to join {:?}", err); + warn!("blockstore_root_scan failed to join {err:?}"); } } } @@ -667,7 +667,7 @@ impl Validator { } for cluster_entrypoint in &cluster_entrypoints { - info!("entrypoint: {:?}", cluster_entrypoint); + info!("entrypoint: {cluster_entrypoint:?}"); } if solana_perf::perf_libs::api().is_some() { @@ -817,10 +817,7 @@ impl Validator { (root_bank.slot(), root_bank.hard_forks()) }; let shred_version = compute_shred_version(&genesis_config.hash(), Some(&hard_forks)); - info!( - "shred version: {shred_version}, hard forks: {:?}", - hard_forks - ); + info!("shred version: {shred_version}, hard forks: {hard_forks:?}"); if let Some(expected_shred_version) = config.expected_shred_version { if expected_shred_version != shred_version { @@ -922,8 +919,11 @@ impl Validator { config.accounts_db_test_hash_calculation, ); info!( - "Using: block-verification-method: {}, block-production-method: {}, transaction-structure: {}", - config.block_verification_method, config.block_production_method, config.transaction_struct + "Using: block-verification-method: {}, block-production-method: {}, \ + transaction-structure: {}", + config.block_verification_method, + config.block_production_method, + config.transaction_struct ); let (replay_vote_sender, replay_vote_receiver) = unbounded(); @@ -1470,14 +1470,11 @@ impl Validator { }; let tower = match process_blockstore.process_to_create_tower() { Ok(tower) => { - info!("Tower state: {:?}", tower); + info!("Tower state: {tower:?}"); tower } Err(e) => { - warn!( - "Unable to retrieve tower: {:?} creating default tower....", - e - ); + warn!("Unable to retrieve tower: {e:?} creating default tower...."); Tower::default() } }; @@ -1943,7 +1940,7 @@ fn post_process_restored_tower( let message = format!("Hard fork is detected; discarding tower restoration result: {tower:?}"); datapoint_error!("tower_error", ("error", message, String),); - error!("{}", message); + error!("{message}"); // unconditionally relax tower requirement so that we can always restore tower // from root bank. @@ -1990,8 +1987,7 @@ fn post_process_restored_tower( } else { error!( "Rebuilding a new tower from the latest vote account due to failed tower \ - restore: {}", - err + restore: {err}" ); } @@ -2017,7 +2013,7 @@ fn load_genesis( assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET); let genesis_hash = genesis_config.hash(); - info!("genesis hash: {}", genesis_hash); + info!("genesis hash: {genesis_hash}"); if let Some(expected_genesis_hash) = config.expected_genesis_hash { if genesis_hash != expected_genesis_hash { @@ -2057,7 +2053,7 @@ fn load_blockstore( ), String, > { - info!("loading ledger from {:?}...", ledger_path); + info!("loading ledger from {ledger_path:?}..."); *start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger; let blockstore = Blockstore::open_with_options(ledger_path, config.blockstore_options.clone()) @@ -2312,7 +2308,7 @@ fn maybe_warp_slot( working_bank.slot() )); } - info!("warping to slot {}", warp_slot); + info!("warping to slot {warp_slot}"); let root_bank = bank_forks.root_bank(); @@ -2383,7 +2379,7 @@ fn should_cleanup_blockstore_incorrect_shred_versions( let blockstore_min_slot = blockstore.lowest_slot(); info!( "Blockstore contains data from slot {blockstore_min_slot} to {blockstore_max_slot}, the \ - latest hard fork is {latest_hard_fork}" + latest hard fork is {latest_hard_fork}" ); if latest_hard_fork < blockstore_min_slot { @@ -2471,8 +2467,7 @@ fn cleanup_blockstore_incorrect_shred_versions( // Backing up the shreds that will be deleted from primary blockstore is // not critical, so swallow errors from backup blockstore operations. let backup_folder = format!( - "{}_backup_{}_{}_{}", - BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, incorrect_shred_version, start_slot, end_slot + "{BLOCKSTORE_DIRECTORY_ROCKS_LEVEL}_backup_{incorrect_shred_version}_{start_slot}_{end_slot}" ); match Blockstore::open_with_options( &blockstore.ledger_path().join(backup_folder), @@ -2557,8 +2552,8 @@ pub enum ValidatorError { GenesisHashMismatch(Hash, Hash), #[error( - "ledger does not have enough data to wait for supermajority: \ - current slot={0}, needed slot={1}" + "ledger does not have enough data to wait for supermajority: current slot={0}, needed \ + slot={1}" )] NotEnoughLedgerData(Slot, Slot), @@ -2648,8 +2643,8 @@ fn wait_for_supermajority( if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT { info!( - "Supermajority reached, {}% active stake detected, starting up now.", - gossip_stake_percent, + "Supermajority reached, {gossip_stake_percent}% active stake detected, \ + starting up now.", ); break; } @@ -2703,9 +2698,8 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo if let Some(peer) = peers.get(&vote_state_node_pubkey) { if peer.shred_version() == my_shred_version { trace!( - "observed {} in gossip, (activated_stake={})", - vote_state_node_pubkey, - activated_stake + "observed {vote_state_node_pubkey} in gossip, \ + (activated_stake={activated_stake})" ); online_stake += activated_stake; } else { @@ -2722,10 +2716,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo let online_stake_percentage = (online_stake as f64 / total_activated_stake as f64) * 100.; if log { - info!( - "{:.3}% of active stake visible in gossip", - online_stake_percentage - ); + info!("{online_stake_percentage:.3}% of active stake visible in gossip"); if !wrong_shred_nodes.is_empty() { info!( diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs index 88ba822a5b51c9..45a733df9296a4 100644 --- a/core/src/voting_service.rs +++ b/core/src/voting_service.rs @@ -113,7 +113,7 @@ impl VotingService { if let VoteOp::PushVote { saved_tower, .. } = &vote_op { let mut measure = Measure::start("tower storage save"); if let Err(err) = tower_storage.store(saved_tower) { - error!("Unable to save tower to storage: {:?}", err); + error!("Unable to save tower to storage: {err:?}"); std::process::exit(1); } measure.stop(); diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index 5197077e460a8a..349e9a7fe21137 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -42,8 +42,8 @@ impl WarmQuicCacheService { let conn = connection_cache.get_connection(&addr); if let Err(err) = conn.send_data(&[]) { warn!( - "Failed to warmup QUIC connection to the leader {leader_pubkey:?} at {addr:?}, \ - Context: {log_context}, Error: {err:?}" + "Failed to warmup QUIC connection to the leader {leader_pubkey:?} at \ + {addr:?}, Context: {log_context}, Error: {err:?}" ); } } diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 75b573c1125619..8b9f4db964ecb7 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -96,7 +96,7 @@ impl WindowServiceMetrics { Error::RecvTimeout(_) => self.num_errors_cross_beam_recv_timeout += 1, Error::Blockstore(err) => { self.num_errors_blockstore += 1; - error!("blockstore error: {}", err); + error!("blockstore error: {err}"); } _ => self.num_errors_other += 1, } diff --git a/core/tests/scheduler_cost_adjustment.rs b/core/tests/scheduler_cost_adjustment.rs index 4de0c335a8c6a1..d5d5d53661a94e 100644 --- a/core/tests/scheduler_cost_adjustment.rs +++ b/core/tests/scheduler_cost_adjustment.rs @@ -124,7 +124,8 @@ impl TestSetup { }, Err(err) => { unreachable!( - "All test Transactions should be well-formatted for execution and commit, err: '{}'", err + "All test Transactions should be well-formatted for execution and commit, \ + err: '{err}'", ); } } diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index e8bda483c416e8..07fca75f8f1aa9 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -396,12 +396,10 @@ fn test_bank_forks_incremental_snapshot() { const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2 - 1; info!( - "Running bank forks incremental snapshot test, full snapshot interval: {} slots, \ - incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots", - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - LAST_SLOT, - SET_ROOT_INTERVAL + "Running bank forks incremental snapshot test, full snapshot interval: \ + {FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots, incremental snapshot interval: \ + {INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots, last slot: {LAST_SLOT}, set root \ + interval: {SET_ROOT_INTERVAL} slots" ); let snapshot_test_config = SnapshotTestConfig::new( @@ -623,18 +621,14 @@ fn test_snapshots_with_background_services( const MAX_WAIT_DURATION: Duration = Duration::from_secs(10); info!("Running snapshots with background services test..."); + #[rustfmt::skip] trace!( "Test configuration parameters:\ - \n\tfull snapshot archive interval: {} slots\ - \n\tincremental snapshot archive interval: {} slots\ - \n\tbank snapshot interval: {} slots\ - \n\tset root interval: {} slots\ - \n\tlast slot: {}", - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - BANK_SNAPSHOT_INTERVAL_SLOTS, - SET_ROOT_INTERVAL_SLOTS, - LAST_SLOT + \n\tfull snapshot archive interval: {FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots\ + \n\tincremental snapshot archive interval: {INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots\ + \n\tbank snapshot interval: {BANK_SNAPSHOT_INTERVAL_SLOTS} slots\ + \n\tset root interval: {SET_ROOT_INTERVAL_SLOTS} slots\ + \n\tlast slot: {LAST_SLOT}" ); let snapshot_test_config = SnapshotTestConfig::new(