Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions accounts-db/src/account_storage_reader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ mod tests {

// Generate a seed from entropy and log the original seed
let seed: u64 = rand::random();
info!("Generated seed: {}", seed);
info!("Generated seed: {seed}");

// Use a seedable RNG with the generated seed for reproducibility
let mut rng = StdRng::seed_from_u64(seed);
Expand Down Expand Up @@ -343,7 +343,7 @@ mod tests {

// Generate a seed from entropy and log the original seed
let seed: u64 = rand::random();
info!("Generated seed: {}", seed);
info!("Generated seed: {seed}");

// Use a seedable RNG with the generated seed for reproducibility
let mut rng = StdRng::seed_from_u64(seed);
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1417,7 +1417,7 @@ mod tests {
accounts.add_root_and_flush_write_cache(i);

if i % 1_000 == 0 {
info!(" store {}", i);
info!(" store {i}");
}
}
info!("done..cleaning..");
Expand Down
19 changes: 9 additions & 10 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2049,7 +2049,7 @@ impl AccountsDb {
MarkAccountsObsolete::No,
);
measure.stop();
debug!("{}", measure);
debug!("{measure}");
self.clean_accounts_stats
.clean_old_root_reclaim_us
.fetch_add(measure.as_us(), Ordering::Relaxed);
Expand Down Expand Up @@ -3758,7 +3758,7 @@ impl AccountsDb {
}
let mut unique_accounts =
self.get_unique_accounts_from_storage_for_shrink(&store, &self.shrink_stats);
debug!("do_shrink_slot_store: slot: {}", slot);
debug!("do_shrink_slot_store: slot: {slot}");
let shrink_collect = self.shrink_collect::<AliveAccounts<'_>>(
&store,
&mut unique_accounts,
Expand Down Expand Up @@ -3940,7 +3940,7 @@ impl AccountsDb {
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
fn shrink_slot_forced(&self, slot: Slot) {
debug!("shrink_slot_forced: slot: {}", slot);
debug!("shrink_slot_forced: slot: {slot}");

if let Some(store) = self
.storage
Expand Down Expand Up @@ -6772,7 +6772,7 @@ impl AccountsDb {
.filter_map(|r| r.as_ref().err())
.next()
{
panic!("failed generating accounts hash files: {:?}", err);
panic!("failed generating accounts hash files: {err:?}");
}

// convert mmapped cache files into slices of data
Expand Down Expand Up @@ -6879,8 +6879,7 @@ impl AccountsDb {
self.calculate_accounts_hash(&calc_config, &sorted_storages, HashStats::default());
if calculated_lamports != total_lamports {
warn!(
"Mismatched total lamports: {} calculated: {}",
total_lamports, calculated_lamports
"Mismatched total lamports: {total_lamports} calculated: {calculated_lamports}"
);
return Err(AccountsHashVerificationError::MismatchedTotalLamports(
calculated_lamports,
Expand Down Expand Up @@ -7421,7 +7420,7 @@ impl AccountsDb {
"remove_dead_slots_metadata: {} dead slots",
dead_slots.len()
);
trace!("remove_dead_slots_metadata: dead_slots: {:?}", dead_slots);
trace!("remove_dead_slots_metadata: dead_slots: {dead_slots:?}");
}
self.accounts_index
.update_roots_stats(&mut accounts_index_root_stats);
Expand Down Expand Up @@ -8515,7 +8514,7 @@ impl AccountsDb {
.alive_bytes
.store(entry.stored_size, Ordering::Release);
} else {
trace!("id: {} clearing count", id);
trace!("id: {id} clearing count");
store.count_and_status.lock_write().0 = 0;
}
}
Expand All @@ -8532,14 +8531,14 @@ impl AccountsDb {
let mut alive_roots: Vec<_> = self.accounts_index.all_alive_roots();
#[allow(clippy::stable_sort_primitive)]
alive_roots.sort();
info!("{}: accounts_index alive_roots: {:?}", label, alive_roots,);
info!("{label}: accounts_index alive_roots: {alive_roots:?}");
self.accounts_index.account_maps.iter().for_each(|map| {
for pubkey in map.keys() {
self.accounts_index.get_and_then(&pubkey, |account_entry| {
if let Some(account_entry) = account_entry {
let list_r = &account_entry.slot_list.read().unwrap();
info!(" key: {} ref_count: {}", pubkey, account_entry.ref_count(),);
info!(" slots: {:?}", list_r);
info!(" slots: {list_r:?}");
}
let add_to_in_mem_cache = false;
(add_to_in_mem_cache, ())
Expand Down
8 changes: 4 additions & 4 deletions accounts-db/src/accounts_db/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1812,7 +1812,7 @@ fn test_accounts_db_purge1() {
accounts.print_accounts_stats("pre_purge");

let ancestors = linear_ancestors(current_slot);
info!("ancestors: {:?}", ancestors);
info!("ancestors: {ancestors:?}");
let hash = accounts.update_accounts_hash_for_tests(current_slot, &ancestors, true, true);

accounts.clean_accounts_for_tests();
Expand Down Expand Up @@ -2945,9 +2945,9 @@ fn test_delete_dependencies() {
},
) in candidates_bin.iter()
{
info!(" purge {} ref_count {} =>", key, ref_count);
info!(" purge {key} ref_count {ref_count} =>");
for x in list {
info!(" {:?}", x);
info!(" {x:?}");
}
}
}
Expand Down Expand Up @@ -3017,7 +3017,7 @@ fn test_store_overhead() {
accounts.add_root_and_flush_write_cache(0);
let store = accounts.storage.get_slot_storage_entry(0).unwrap();
let total_len = store.accounts.len();
info!("total: {}", total_len);
info!("total: {total_len}");
assert_eq!(total_len, STORE_META_OVERHEAD);
}

Expand Down
8 changes: 4 additions & 4 deletions accounts-db/src/accounts_hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ impl AccountsHasher<'_> {
})
.collect();
time.stop();
debug!("hashing {} {}", total_hashes, time);
debug!("hashing {total_hashes} {time}");

if result.len() == 1 {
result[0]
Expand Down Expand Up @@ -725,7 +725,7 @@ impl AccountsHasher<'_> {
})
.collect();
time.stop();
debug!("hashing {} {}", total_hashes, time);
debug!("hashing {total_hashes} {time}");

if let Some(mut specific_level_count_value) = specific_level_count {
specific_level_count_value -= levels_hashed;
Expand Down Expand Up @@ -2428,7 +2428,7 @@ mod tests {
|start| &reduced[start..],
None,
);
assert_eq!(result, result2.0, "len: {}", len);
assert_eq!(result, result2.0, "len: {len}");

let result2 = AccountsHasher::compute_merkle_root_from_slices(
len,
Expand All @@ -2437,7 +2437,7 @@ mod tests {
|start| &reduced[start..],
None,
);
assert_eq!(result, result2.0, "len: {}", len);
assert_eq!(result, result2.0, "len: {len}");

let max = std::cmp::min(reduced.len(), fanout * 2);
for left in 0..max {
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/accounts_index/secondary.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,6 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
.iter()
.rev()
.take(20)
.for_each(|(v, k)| info!("owner: {}, accounts: {}", k, v));
.for_each(|(v, k)| info!("owner: {k}, accounts: {v}"));
}
}
5 changes: 2 additions & 3 deletions accounts-db/src/append_vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,9 +507,8 @@ impl AppendVec {
if result.is_err() {
// for vm.max_map_count, error is: {code: 12, kind: Other, message: "Cannot allocate memory"}
info!(
"memory map error: {:?}. This may be because vm.max_map_count is not set \
correctly.",
result
"memory map error: {result:?}. This may be because vm.max_map_count is not \
set correctly."
);
}
result?
Expand Down
13 changes: 4 additions & 9 deletions accounts-db/src/hardened_unpack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,12 +267,7 @@ impl<R: Read> ArchiveChunker<R> {
}

fn checked_total_size_sum(total_size: u64, entry_size: u64, limit_size: u64) -> Result<u64> {
trace!(
"checked_total_size_sum: {} + {} < {}",
total_size,
entry_size,
limit_size,
);
trace!("checked_total_size_sum: {total_size} + {entry_size} < {limit_size}");
let total_size = total_size.saturating_add(entry_size);
if total_size > limit_size {
return Err(UnpackError::Archive(format!(
Expand Down Expand Up @@ -413,7 +408,7 @@ where

total_entries += 1;
}
info!("unpacked {} entries total", total_entries);
info!("unpacked {total_entries} entries total");

return Ok(());

Expand Down Expand Up @@ -697,7 +692,7 @@ pub fn unpack_genesis_archive(
destination_dir: &Path,
max_genesis_archive_unpacked_size: u64,
) -> std::result::Result<(), UnpackError> {
info!("Extracting {:?}...", archive_filename);
info!("Extracting {archive_filename:?}...");
let extract_start = Instant::now();

fs::create_dir_all(destination_dir)?;
Expand Down Expand Up @@ -733,7 +728,7 @@ fn is_valid_genesis_archive_entry<'a>(
parts: &[&str],
kind: tar::EntryType,
) -> UnpackPath<'a> {
trace!("validating: {:?} {:?}", parts, kind);
trace!("validating: {parts:?} {kind:?}");
#[allow(clippy::match_like_matches_macro)]
match (parts, kind) {
([DEFAULT_GENESIS_FILE], GNUSparse) => UnpackPath::Valid(unpack_dir),
Expand Down
12 changes: 6 additions & 6 deletions accounts-db/src/rolling_bit_field.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ impl std::fmt::Debug for RollingBitField {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut bits = String::from("[");
let mut prev = self.bits[0];
bits.push_str(&format!("{}", prev));
bits.push_str(&format!("{prev}"));
let mut index = 1;
while index < self.bits.len() {
if self.bits[index] != prev {
Expand All @@ -55,26 +55,26 @@ impl std::fmt::Debug for RollingBitField {
index += 1;
}
if index > 1 {
bits.push_str(&format!(";{}", index));
bits.push_str(&format!(";{index}"));
}
if index < self.bits.len() {
bits.push_str(&format!(", {}", prev));
bits.push_str(&format!(", {prev}"));
}
let mut count = 0;
while index < self.bits.len() {
if self.bits[index] != prev {
if count > 1 {
bits.push_str(&format!(";{}", count));
bits.push_str(&format!(";{count}"));
}
count = 0;
prev = self.bits[index];
bits.push_str(&format!(", {}", prev));
bits.push_str(&format!(", {prev}"));
}
count += 1;
index += 1;
}
if count > 1 {
bits.push_str(&format!(";{}", count));
bits.push_str(&format!(";{count}"));
}
bits.push(']');
// The order of the `count` and `bits` fields is changed on
Expand Down
6 changes: 3 additions & 3 deletions accounts-db/store-histogram/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ fn calc(info: &[(usize, usize)], bin_widths: Vec<usize>, offset: i64) {
eprintln!("lowest slot: {min}");
eprintln!("highest slot: {max_inclusive}");
eprintln!("slot range: {}", max_inclusive - min + 1);
eprintln!("ancient boundary: {}", outside_slot);
eprintln!("ancient boundary: {outside_slot}");
eprintln!(
"number of slots beyond ancient boundary: {}",
info.iter()
Expand Down Expand Up @@ -305,10 +305,10 @@ fn main() {
calc(&info, normal_ancient(offset), offset);
eprintln!("========");
} else {
panic!("couldn't read folder: {path:?}, {:?}", dir);
panic!("couldn't read folder: {path:?}, {dir:?}");
}
} else {
panic!("not a folder: {:?}", path);
panic!("not a folder: {path:?}");
}
}

Expand Down
9 changes: 7 additions & 2 deletions bucket_map/src/bucket_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,12 @@ impl<O: BucketOccupied> BucketStorage<O> {
let full_page_bytes = bytes / PAGE_SIZE * PAGE_SIZE / cell_size * cell_size;
if full_page_bytes < bytes {
let bytes_new = ((bytes / PAGE_SIZE) + 1) * PAGE_SIZE / cell_size * cell_size;
assert!(bytes_new >= bytes, "allocating less than requested, capacity: {}, bytes: {}, bytes_new: {}, full_page_bytes: {}", capacity.capacity(), bytes, bytes_new, full_page_bytes);
assert!(
bytes_new >= bytes,
"allocating less than requested, capacity: {}, bytes: {bytes}, bytes_new: \
{bytes_new}, full_page_bytes: {full_page_bytes}",
capacity.capacity()
);
assert_eq!(bytes_new % cell_size, 0);
bytes = bytes_new;
*capacity = Capacity::Actual(bytes / cell_size);
Expand Down Expand Up @@ -452,7 +457,7 @@ impl<O: BucketOccupied> BucketStorage<O> {
let r = thread_rng().gen_range(0..drives.len());
let drive = &drives[r];
let file_random = thread_rng().gen_range(0..u128::MAX);
let pos = format!("{}", file_random,);
let pos = format!("{file_random}");
let file = drive.join(pos);
let res = Self::map_open_file(file.clone(), true, bytes, stats).unwrap();

Expand Down
2 changes: 1 addition & 1 deletion bucket_map/src/restart.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ impl Debug for RestartableBucket {
impl Debug for Restart {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
let header = self.get_header();
writeln!(f, "{:?}", header)?;
writeln!(f, "{header:?}")?;
write!(
f,
"{:?}",
Expand Down
Loading