diff --git a/perf/src/deduper.rs b/perf/src/deduper.rs index a4853c3efdfc37..da4d5dfb9c5d66 100644 --- a/perf/src/deduper.rs +++ b/perf/src/deduper.rs @@ -171,7 +171,7 @@ mod tests { let mut batches = to_packet_batches(&(0..1000).map(|_| test_tx()).collect::>(), 128); discard += dedup_packets_and_count_discards(&filter, &mut batches) as usize; - trace!("{} {}", i, discard); + trace!("{i} {discard}"); if filter.popcount.load(Ordering::Relaxed) > capacity { break; } diff --git a/perf/src/lib.rs b/perf/src/lib.rs index f9b0b12772b680..864d541c8746f1 100644 --- a/perf/src/lib.rs +++ b/perf/src/lib.rs @@ -68,8 +68,9 @@ pub fn report_target_features() { info!("AVX detected"); } else { error!( - "Incompatible CPU detected: missing AVX support. Please build from source on the target" - ); + "Incompatible CPU detected: missing AVX support. Please build from source on \ + the target" + ); std::process::abort(); } } @@ -83,7 +84,8 @@ pub fn report_target_features() { info!("AVX2 detected"); } else { error!( - "Incompatible CPU detected: missing AVX2 support. Please build from source on the target" + "Incompatible CPU detected: missing AVX2 support. Please build from source on \ + the target" ); std::process::abort(); } diff --git a/perf/src/packet.rs b/perf/src/packet.rs index 68277d46ec5a63..b7d9d31be13317 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -685,7 +685,7 @@ impl PinnedPacketBatch { // TODO: This should never happen. Instead the caller should // break the payload into smaller messages, and here any errors // should be propagated. - error!("Couldn't write to packet {:?}. Data skipped.", e); + error!("Couldn't write to packet {e:?}. Data skipped."); packet.meta_mut().set_discard(true); } } else { diff --git a/perf/src/perf_libs.rs b/perf/src/perf_libs.rs index feedc6bc03b875..a9d336bfa255dd 100644 --- a/perf/src/perf_libs.rs +++ b/perf/src/perf_libs.rs @@ -84,10 +84,10 @@ pub struct Api<'a> { static API: OnceLock> = OnceLock::new(); fn init(name: &OsStr) { - info!("Loading {:?}", name); + info!("Loading {name:?}"); API.get_or_init(|| { unsafe { Container::load(name) }.unwrap_or_else(|err| { - error!("Unable to load {:?}: {}", name, err); + error!("Unable to load {name:?}: {err}"); std::process::exit(1); }) }); @@ -97,10 +97,10 @@ pub fn locate_perf_libs() -> Option { let exe = env::current_exe().expect("Unable to get executable path"); let perf_libs = exe.parent().unwrap().join("perf-libs"); if perf_libs.is_dir() { - info!("perf-libs found at {:?}", perf_libs); + info!("perf-libs found at {perf_libs:?}"); return Some(perf_libs); } - warn!("{:?} does not exist", perf_libs); + warn!("{perf_libs:?} does not exist"); None } @@ -108,10 +108,10 @@ fn find_cuda_home(perf_libs_path: &Path) -> Option { if let Ok(cuda_home) = env::var("CUDA_HOME") { let path = PathBuf::from(cuda_home); if path.is_dir() { - info!("Using CUDA_HOME: {:?}", path); + info!("Using CUDA_HOME: {path:?}"); return Some(path); } - warn!("Ignoring CUDA_HOME, not a path: {:?}", path); + warn!("Ignoring CUDA_HOME, not a path: {path:?}"); } // Search /usr/local for a `cuda-` directory that matches a perf-libs subdirectory @@ -130,7 +130,7 @@ fn find_cuda_home(perf_libs_path: &Path) -> Option { continue; } - info!("CUDA installation found at {:?}", cuda_home); + info!("CUDA installation found at {cuda_home:?}"); return Some(cuda_home); } None @@ -141,7 +141,7 @@ pub fn append_to_ld_library_path(mut ld_library_path: String) { ld_library_path.push(':'); ld_library_path.push_str(&env_value); } - info!("setting ld_library_path to: {:?}", ld_library_path); + info!("setting ld_library_path to: {ld_library_path:?}"); env::set_var("LD_LIBRARY_PATH", ld_library_path); } @@ -154,7 +154,7 @@ pub fn init_cuda() { // to ensure the correct CUDA version is used append_to_ld_library_path(cuda_lib64_dir.to_str().unwrap_or("").to_string()) } else { - warn!("CUDA lib64 directory does not exist: {:?}", cuda_lib64_dir); + warn!("CUDA lib64 directory does not exist: {cuda_lib64_dir:?}"); } let libcuda_crypt = perf_libs_path diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 0a31df16bf2a49..0b1c2209860d15 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -47,7 +47,7 @@ pub struct RecyclerX { impl Default for RecyclerX { fn default() -> RecyclerX { let id = thread_rng().gen_range(0..1000); - trace!("new recycler..{}", id); + trace!("new recycler..{id}"); RecyclerX { gc: Mutex::default(), stats: RecyclerStats::default(), diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 261f8ea7dd9933..80a7f7e80dc862 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -510,7 +510,7 @@ pub fn shrink_batches(batches: Vec) -> Vec { } pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, packet_count: usize) { - debug!("CPU ECDSA for {}", packet_count); + debug!("CPU ECDSA for {packet_count}"); PAR_THREAD_POOL.install(|| { batches.par_iter_mut().flatten().for_each(|mut packet| { if !packet.meta().discard() && !verify_packet(&mut packet, reject_non_vote) { @@ -522,7 +522,7 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) { let packet_count = count_packets_in_batches(batches); - debug!("disabled ECDSA for {}", packet_count); + debug!("disabled ECDSA for {packet_count}"); PAR_THREAD_POOL.install(|| { batches.par_iter_mut().flatten().for_each(|mut packet| { packet.meta_mut().set_discard(false); @@ -613,7 +613,7 @@ pub fn ed25519_verify( let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) = generate_offsets(batches, recycler, reject_non_vote); - debug!("CUDA ECDSA for {}", valid_packet_count); + debug!("CUDA ECDSA for {valid_packet_count}"); debug!("allocating out.."); let mut out = recycler_out.allocate("out_buffer"); out.set_pinnable(); @@ -642,7 +642,7 @@ pub fn ed25519_verify( num_packets = num_packets.saturating_add(batch.len()); } out.resize(signature_offsets.len(), 0); - trace!("Starting verify num packets: {}", num_packets); + trace!("Starting verify num packets: {num_packets}"); trace!("elem len: {}", elems.len() as u32); trace!("packet sizeof: {}", size_of::() as u32); trace!("len offset: {}", PACKET_DATA_SIZE as u32); @@ -662,7 +662,7 @@ pub fn ed25519_verify( USE_NON_DEFAULT_STREAM, ); if res != 0 { - trace!("RETURN!!!: {}", res); + trace!("RETURN!!!: {res}"); } } trace!("done verify"); @@ -879,7 +879,7 @@ mod tests { let mut tx = Transaction::new_unsigned(message); info!("message: {:?}", tx.message_data()); - info!("tx: {:?}", tx); + info!("tx: {tx:?}"); let sig = keypair1.try_sign_message(&tx.message_data()).unwrap(); tx.signatures = vec![sig; NUM_SIG]; @@ -1734,7 +1734,7 @@ mod tests { let test_cases = set_discards.iter().zip(&expect_valids).enumerate(); for (i, (set_discard, (expect_batch_count, expect_valid_packets))) in test_cases { - debug!("test_shrink case: {}", i); + debug!("test_shrink case: {i}"); let mut batches = to_packet_batches( &(0..PACKET_COUNT).map(|_| test_tx()).collect::>(), PACKETS_PER_BATCH, @@ -1747,18 +1747,18 @@ mod tests { .for_each(|(j, mut p)| p.meta_mut().set_discard(set_discard(i, j))) }); assert_eq!(count_valid_packets(&batches), *expect_valid_packets); - debug!("show valid packets for case {}", i); + debug!("show valid packets for case {i}"); batches.iter_mut().enumerate().for_each(|(i, b)| { b.iter_mut().enumerate().for_each(|(j, p)| { if !p.meta().discard() { - trace!("{} {}", i, j) + trace!("{i} {j}") } }) }); - debug!("done show valid packets for case {}", i); + debug!("done show valid packets for case {i}"); let batches = shrink_batches(batches); let shrunken_batch_count = batches.len(); - debug!("shrunk batch test {} count: {}", i, shrunken_batch_count); + debug!("shrunk batch test {i} count: {shrunken_batch_count}"); assert_eq!(shrunken_batch_count, *expect_batch_count); assert_eq!(count_valid_packets(&batches), *expect_valid_packets); } diff --git a/perf/src/thread.rs b/perf/src/thread.rs index 36cfde10826b14..7a101390ee5024 100644 --- a/perf/src/thread.rs +++ b/perf/src/thread.rs @@ -81,9 +81,8 @@ where Ok(()) } else { Err(String::from( - "niceness adjustment supported only on Linux; negative adjustment \ - (priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \ - for details)", + "niceness adjustment supported only on Linux; negative adjustment (priority increase) \ + requires root or CAP_SYS_NICE (see `man 7 capabilities` for details)", )) } }