diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index 332460f9c9c3de..70776106b6c581 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -42,22 +42,6 @@ fn new_accounts_db(account_paths: Vec) -> AccountsDb { ) } -#[bench] -fn bench_update_accounts_hash(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); - accounts.accounts_db.add_root_and_flush_write_cache(0); - let ancestors = Ancestors::from(vec![0]); - bencher.iter(|| { - accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - }); -} - #[bench] fn bench_accounts_delta_hash(bencher: &mut Bencher) { solana_logger::setup(); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1a0431a2cbc10d..38c7c3741c43e7 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5930,100 +5930,6 @@ impl AccountsDb { AccountsHasher::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::()) } - pub fn calculate_accounts_hash_from_index( - &self, - max_slot: Slot, - config: &CalcAccountsHashConfig<'_>, - ) -> (AccountsHash, u64) { - let mut collect = Measure::start("collect"); - let keys: Vec<_> = self - .accounts_index - .account_maps - .iter() - .flat_map(|map| { - let mut keys = map.keys(); - keys.sort_unstable(); // hashmap is not ordered, but bins are relative to each other - keys - }) - .collect(); - collect.stop(); - - // Pick a chunk size big enough to allow us to produce output vectors that are smaller than the overall size. - // We'll also accumulate the lamports within each chunk and fewer chunks results in less contention to accumulate the sum. - let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4); - let total_lamports = Mutex::::new(0); - - let get_account_hashes = || { - keys.par_chunks(chunks) - .map(|pubkeys| { - let mut sum = 0u128; - let account_hashes: Vec = pubkeys - .iter() - .filter_map(|pubkey| { - let index_entry = self.accounts_index.get_cloned(pubkey)?; - self.accounts_index - .get_account_info_with_and_then( - &index_entry, - config.ancestors, - Some(max_slot), - |(slot, account_info)| { - if account_info.is_zero_lamport() { - return None; - } - self.get_account_accessor( - slot, - pubkey, - &account_info.storage_location(), - ) - .get_loaded_account(|loaded_account| { - let mut loaded_hash = loaded_account.loaded_hash(); - let balance = loaded_account.lamports(); - let hash_is_missing = - loaded_hash == AccountHash(Hash::default()); - if hash_is_missing { - let computed_hash = Self::hash_account( - &loaded_account, - loaded_account.pubkey(), - ); - loaded_hash = computed_hash; - } - sum += balance as u128; - loaded_hash.0 - }) - }, - ) - .flatten() - }) - .collect(); - let mut total = total_lamports.lock().unwrap(); - *total = AccountsHasher::checked_cast_for_capitalization(*total as u128 + sum); - account_hashes - }) - .collect() - }; - - let mut scan = Measure::start("scan"); - let account_hashes: Vec> = self.thread_pool_clean.install(get_account_hashes); - scan.stop(); - - let total_lamports = *total_lamports.lock().unwrap(); - - let mut hash_time = Measure::start("hash"); - let (accumulated_hash, hash_total) = AccountsHasher::calculate_hash(account_hashes); - hash_time.stop(); - - datapoint_info!( - "calculate_accounts_hash_from_index", - ("accounts_scan", scan.as_us(), i64), - ("hash", hash_time.as_us(), i64), - ("hash_total", hash_total, i64), - ("collect", collect.as_us(), i64), - ); - - let accounts_hash = AccountsHash(accumulated_hash); - (accounts_hash, total_lamports) - } - /// Calculates the accounts lt hash /// /// Only intended to be called at startup (or by tests). @@ -6172,26 +6078,6 @@ impl AccountsDb { .expect("capitalization cannot overflow") } - /// This is only valid to call from tests. - /// run the accounts hash calculation and store the results - pub fn update_accounts_hash_for_tests( - &self, - slot: Slot, - ancestors: &Ancestors, - debug_verify: bool, - is_startup: bool, - ) -> (AccountsHash, u64) { - self.update_accounts_hash_with_verify_from( - CalcAccountsHashDataSource::IndexForTests, - debug_verify, - slot, - ancestors, - None, - &EpochSchedule::default(), - is_startup, - ) - } - fn update_old_slot_stats(&self, stats: &HashStats, storage: Option<&Arc>) { if let Some(storage) = storage { stats.roots_older_than_epoch.fetch_add(1, Ordering::Relaxed); @@ -6259,155 +6145,6 @@ impl AccountsDb { true } - pub fn calculate_accounts_hash_from( - &self, - data_source: CalcAccountsHashDataSource, - slot: Slot, - config: &CalcAccountsHashConfig<'_>, - ) -> (AccountsHash, u64) { - match data_source { - CalcAccountsHashDataSource::Storages => { - if self.accounts_cache.contains_any_slots(slot) { - // this indicates a race condition - inc_new_counter_info!("accounts_hash_items_in_write_cache", 1); - } - - let mut collect_time = Measure::start("collect"); - let (combined_maps, slots) = self.get_storages(..=slot); - collect_time.stop(); - - let mut sort_time = Measure::start("sort_storages"); - let min_root = self.accounts_index.min_alive_root(); - let storages = SortedStorages::new_with_slots( - combined_maps.iter().zip(slots), - min_root, - Some(slot), - ); - sort_time.stop(); - - let mut timings = HashStats { - collect_snapshots_us: collect_time.as_us(), - storage_sort_us: sort_time.as_us(), - ..HashStats::default() - }; - timings.calc_storage_size_quartiles(&combined_maps); - - self.calculate_accounts_hash(config, &storages, timings) - } - CalcAccountsHashDataSource::IndexForTests => { - self.calculate_accounts_hash_from_index(slot, config) - } - } - } - - fn calculate_accounts_hash_with_verify_from( - &self, - data_source: CalcAccountsHashDataSource, - debug_verify: bool, - slot: Slot, - config: CalcAccountsHashConfig<'_>, - expected_capitalization: Option, - ) -> (AccountsHash, u64) { - let (accounts_hash, total_lamports) = - self.calculate_accounts_hash_from(data_source, slot, &config); - if debug_verify { - // calculate the other way (store or non-store) and verify results match. - let data_source_other = match data_source { - CalcAccountsHashDataSource::IndexForTests => CalcAccountsHashDataSource::Storages, - CalcAccountsHashDataSource::Storages => CalcAccountsHashDataSource::IndexForTests, - }; - let (accounts_hash_other, total_lamports_other) = - self.calculate_accounts_hash_from(data_source_other, slot, &config); - - let success = accounts_hash == accounts_hash_other - && total_lamports == total_lamports_other - && total_lamports == expected_capitalization.unwrap_or(total_lamports); - assert!( - success, - "calculate_accounts_hash_with_verify mismatch. hashes: {}, {}; lamports: {}, {}; \ - expected lamports: {:?}, data source: {:?}, slot: {}", - accounts_hash.0, - accounts_hash_other.0, - total_lamports, - total_lamports_other, - expected_capitalization, - data_source, - slot - ); - } - (accounts_hash, total_lamports) - } - - /// run the accounts hash calculation and store the results - #[allow(clippy::too_many_arguments)] - pub fn update_accounts_hash_with_verify_from( - &self, - data_source: CalcAccountsHashDataSource, - debug_verify: bool, - slot: Slot, - ancestors: &Ancestors, - expected_capitalization: Option, - epoch_schedule: &EpochSchedule, - is_startup: bool, - ) -> (AccountsHash, u64) { - let epoch = epoch_schedule.get_epoch(slot); - let (accounts_hash, total_lamports) = self.calculate_accounts_hash_with_verify_from( - data_source, - debug_verify, - slot, - CalcAccountsHashConfig { - use_bg_thread_pool: !is_startup, - ancestors: Some(ancestors), - epoch_schedule, - epoch, - store_detailed_debug_info_on_failure: false, - }, - expected_capitalization, - ); - self.set_accounts_hash(slot, (accounts_hash, total_lamports)); - (accounts_hash, total_lamports) - } - - /// Calculate the full accounts hash for `storages` and save the results at `slot` - pub fn update_accounts_hash( - &self, - config: &CalcAccountsHashConfig<'_>, - storages: &SortedStorages<'_>, - slot: Slot, - stats: HashStats, - ) -> (AccountsHash, /*capitalization*/ u64) { - let accounts_hash = self.calculate_accounts_hash(config, storages, stats); - let old_accounts_hash = self.set_accounts_hash(slot, accounts_hash); - if let Some(old_accounts_hash) = old_accounts_hash { - warn!( - "Accounts hash was already set for slot {slot}! old: {old_accounts_hash:?}, new: \ - {accounts_hash:?}" - ); - } - accounts_hash - } - - /// Calculate the incremental accounts hash for `storages` and save the results at `slot` - pub fn update_incremental_accounts_hash( - &self, - config: &CalcAccountsHashConfig<'_>, - storages: &SortedStorages<'_>, - slot: Slot, - stats: HashStats, - ) -> (IncrementalAccountsHash, /*capitalization*/ u64) { - let incremental_accounts_hash = - self.calculate_incremental_accounts_hash(config, storages, stats); - let old_incremental_accounts_hash = - self.set_incremental_accounts_hash(slot, incremental_accounts_hash); - if let Some(old_incremental_accounts_hash) = old_incremental_accounts_hash { - warn!( - "Incremental accounts hash was already set for slot {slot}! old: \ - {old_incremental_accounts_hash:?}, new: {incremental_accounts_hash:?}" - ); - } - incremental_accounts_hash - } - /// Set the accounts hash for `slot` /// /// returns the previous accounts hash for `slot` @@ -6541,6 +6278,7 @@ impl AccountsDb { /// /// This is intended to be used by startup verification, and also AccountsHashVerifier. /// Uses account storage files as the data source for the calculation. + // obsolete, will be removed next pub fn calculate_accounts_hash( &self, config: &CalcAccountsHashConfig<'_>, @@ -6567,6 +6305,7 @@ impl AccountsDb { /// included in the incremental snapshot. This ensures reconstructing the AccountsDb is /// still correct when using this incremental accounts hash. /// - `storages` must be the same as the ones going into the incremental snapshot. + // obsolete, will be removed next pub fn calculate_incremental_accounts_hash( &self, config: &CalcAccountsHashConfig<'_>, @@ -6587,6 +6326,7 @@ impl AccountsDb { /// The shared code for calculating accounts hash from storages. /// Used for both full accounts hash and incremental accounts hash calculation. + // obsolete, will be removed next fn calculate_accounts_hash_from_storages( &self, config: &CalcAccountsHashConfig<'_>, diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 03f8f952352668..bca593f2887858 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -1866,12 +1866,12 @@ fn test_accounts_db_purge1() { let ancestors = linear_ancestors(current_slot); info!("ancestors: {ancestors:?}"); - let hash = accounts.update_accounts_hash_for_tests(current_slot, &ancestors, true, true); + let hash = accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot); accounts.clean_accounts_for_tests(); assert_eq!( - accounts.update_accounts_hash_for_tests(current_slot, &ancestors, true, true), + accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot), hash ); @@ -2153,7 +2153,6 @@ fn test_verify_bank_capitalization() { db.store_for_tests(some_slot, &[(&key, &account)]); if pass == 0 { db.add_root_and_flush_write_cache(some_slot); - db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); assert_eq!( db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot), @@ -2171,7 +2170,6 @@ fn test_verify_bank_capitalization() { )], ); db.add_root_and_flush_write_cache(some_slot); - db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); assert_eq!( db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot), @@ -7061,153 +7059,6 @@ fn test_handle_dropped_roots_for_ancient_assert() { db.handle_dropped_roots_for_ancient(dropped_roots.into_iter()); } -define_accounts_db_test!(test_calculate_incremental_accounts_hash, |accounts_db| { - let owner = Pubkey::new_unique(); - let mut accounts: Vec<_> = (0..10) - .map(|_| (Pubkey::new_unique(), AccountSharedData::new(0, 0, &owner))) - .collect(); - - // store some accounts into slot 0 - let slot = 0; - { - accounts[0].1.set_lamports(0); - accounts[1].1.set_lamports(1); - accounts[2].1.set_lamports(10); - accounts[3].1.set_lamports(100); - //accounts[4].1.set_lamports(1_000); <-- will be added next slot - - let accounts = vec![ - (&accounts[0].0, &accounts[0].1), - (&accounts[1].0, &accounts[1].1), - (&accounts[2].0, &accounts[2].1), - (&accounts[3].0, &accounts[3].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // store some accounts into slot 1 - let slot = slot + 1; - { - //accounts[0].1.set_lamports(0); <-- unchanged - accounts[1].1.set_lamports(0); /* <-- drain account */ - //accounts[2].1.set_lamports(10); <-- unchanged - //accounts[3].1.set_lamports(100); <-- unchanged - accounts[4].1.set_lamports(1_000); /* <-- add account */ - - let accounts = vec![ - (&accounts[1].0, &accounts[1].1), - (&accounts[4].0, &accounts[4].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // calculate the full accounts hash - let full_accounts_hash = { - accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default()); - let (storages, _) = accounts_db.get_storages(..=slot); - let storages = SortedStorages::new(&storages); - accounts_db.calculate_accounts_hash( - &CalcAccountsHashConfig::default(), - &storages, - HashStats::default(), - ) - }; - assert_eq!(full_accounts_hash.1, 1_110); - let full_accounts_hash_slot = slot; - - // Calculate the expected full accounts hash here and ensure it matches. - // Ensure the zero-lamport accounts are NOT included in the full accounts hash. - let full_account_hashes = [(2, 0), (3, 0), (4, 1)].into_iter().map(|(index, _slot)| { - let (pubkey, account) = &accounts[index]; - AccountsDb::hash_account(account, pubkey).0 - }); - let expected_accounts_hash = AccountsHash(compute_merkle_root(full_account_hashes)); - assert_eq!(full_accounts_hash.0, expected_accounts_hash); - - // store accounts into slot 2 - let slot = slot + 1; - { - //accounts[0].1.set_lamports(0); <-- unchanged - //accounts[1].1.set_lamports(0); <-- unchanged - accounts[2].1.set_lamports(0); /* <-- drain account */ - //accounts[3].1.set_lamports(100); <-- unchanged - //accounts[4].1.set_lamports(1_000); <-- unchanged - accounts[5].1.set_lamports(10_000); /* <-- add account */ - accounts[6].1.set_lamports(100_000); /* <-- add account */ - //accounts[7].1.set_lamports(1_000_000); <-- will be added next slot - - let accounts = vec![ - (&accounts[2].0, &accounts[2].1), - (&accounts[5].0, &accounts[5].1), - (&accounts[6].0, &accounts[6].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // store accounts into slot 3 - let slot = slot + 1; - { - //accounts[0].1.set_lamports(0); <-- unchanged - //accounts[1].1.set_lamports(0); <-- unchanged - //accounts[2].1.set_lamports(0); <-- unchanged - accounts[3].1.set_lamports(0); /* <-- drain account */ - //accounts[4].1.set_lamports(1_000); <-- unchanged - accounts[5].1.set_lamports(0); /* <-- drain account */ - //accounts[6].1.set_lamports(100_000); <-- unchanged - accounts[7].1.set_lamports(1_000_000); /* <-- add account */ - - let accounts = vec![ - (&accounts[3].0, &accounts[3].1), - (&accounts[5].0, &accounts[5].1), - (&accounts[7].0, &accounts[7].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // calculate the incremental accounts hash - let incremental_accounts_hash = { - accounts_db.set_latest_full_snapshot_slot(full_accounts_hash_slot); - accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default()); - let (storages, _) = accounts_db.get_storages(full_accounts_hash_slot + 1..=slot); - let storages = SortedStorages::new(&storages); - accounts_db.calculate_incremental_accounts_hash( - &CalcAccountsHashConfig::default(), - &storages, - HashStats::default(), - ) - }; - assert_eq!(incremental_accounts_hash.1, 1_100_000); - - // Ensure the zero-lamport accounts are included in the IAH. - // Accounts 2, 3, and 5 are all zero-lamports. - let incremental_account_hashes = - [(2, 2), (3, 3), (5, 3), (6, 2), (7, 3)] - .into_iter() - .map(|(index, _slot)| { - let (pubkey, account) = &accounts[index]; - if account.is_zero_lamport() { - // For incremental accounts hash, the hash of a zero lamport account is the hash of its pubkey. - // Ensure this implementation detail remains in sync with AccountsHasher::de_dup_in_parallel(). - let hash = blake3::hash(bytemuck::bytes_of(pubkey)); - Hash::new_from_array(hash.into()) - } else { - AccountsDb::hash_account(account, pubkey).0 - } - }); - let expected_accounts_hash = - IncrementalAccountsHash(compute_merkle_root(incremental_account_hashes)); - assert_eq!(incremental_accounts_hash.0, expected_accounts_hash); -}); - -fn compute_merkle_root(hashes: impl IntoIterator) -> Hash { - let hashes = hashes.into_iter().collect(); - AccountsHasher::compute_merkle_root_recurse(hashes, MERKLE_FANOUT) -} - /// Test that `clean` reclaims old accounts when cleaning old storages /// /// When `clean` constructs candidates from old storages, pubkeys in these storages may have other diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 2ddc70897660cc..60f7e9c5ab7dab 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -784,7 +784,7 @@ struct SerializableAccountsDb<'a> { account_storage_entries: &'a [Vec>], bank_hash_stats: BankHashStats, accounts_delta_hash: AccountsDeltaHash, // obsolete, will be removed next - accounts_hash: AccountsHash, + accounts_hash: AccountsHash, // obsolete, will be removed next write_version: u64, } diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 7ddd5c0a54ef16..e00dbcfd14f004 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -104,7 +104,7 @@ mod serde_snapshot_tests { { let bank_hash_stats = BankHashStats::default(); let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); - let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; + let accounts_hash = AccountsHash(Hash::default()); // obsolete, any value works let write_version = accounts_db.write_version.load(Ordering::Acquire); serialize_into( stream, @@ -219,10 +219,9 @@ mod serde_snapshot_tests { check_accounts_local(&accounts, &pubkeys, 100); accounts.accounts_db.add_root_and_flush_write_cache(slot); let accounts_delta_hash = accounts.accounts_db.calculate_accounts_delta_hash(slot); - let accounts_hash = AccountsHash(Hash::new_unique()); - accounts + let accounts_hash = accounts .accounts_db - .set_accounts_hash(slot, (accounts_hash, u64::default())); + .calculate_accounts_lt_hash_at_startup_from_index(&Ancestors::default(), slot); let mut writer = Cursor::new(vec![]); accountsdb_to_stream( @@ -257,7 +256,9 @@ mod serde_snapshot_tests { check_accounts_local(&daccounts, &pubkeys, 100); let daccounts_delta_hash = daccounts.accounts_db.calculate_accounts_delta_hash(slot); assert_eq!(accounts_delta_hash, daccounts_delta_hash); - let daccounts_hash = daccounts.accounts_db.get_accounts_hash(slot).unwrap().0; + let daccounts_hash = accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup_from_index(&Ancestors::default(), slot); assert_eq!(accounts_hash, daccounts_hash); } @@ -282,7 +283,6 @@ mod serde_snapshot_tests { db.add_root_and_flush_write_cache(new_root); db.calculate_accounts_delta_hash(new_root); - db.update_accounts_hash_for_tests(new_root, &linear_ancestors(new_root), false, false); // Simulate reconstruction from snapshot let db = reconstruct_accounts_db_via_serialization(&db, new_root, storage_access); @@ -364,7 +364,6 @@ mod serde_snapshot_tests { accounts.check_storage(2, 31, 31); let ancestors = linear_ancestors(latest_slot); - accounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false); accounts.clean_accounts_for_tests(); // The first 20 accounts of slot 0 have been updated in slot 2, as well as @@ -392,10 +391,6 @@ mod serde_snapshot_tests { daccounts.get_accounts_delta_hash(latest_slot).unwrap(), accounts.get_accounts_delta_hash(latest_slot).unwrap(), ); - assert_eq!( - daccounts.get_accounts_hash(latest_slot).unwrap().0, - accounts.get_accounts_hash(latest_slot).unwrap().0, - ); daccounts.print_count_and_status("daccounts"); @@ -407,8 +402,8 @@ mod serde_snapshot_tests { daccounts.check_storage(2, 31, 31); assert_eq!( - daccounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false,), - accounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false,) + daccounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, latest_slot), + accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, latest_slot), ); } } @@ -451,12 +446,6 @@ mod serde_snapshot_tests { accounts.print_accounts_stats("accounts_post_purge"); accounts.calculate_accounts_delta_hash(current_slot); - accounts.update_accounts_hash_for_tests( - current_slot, - &linear_ancestors(current_slot), - false, - false, - ); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); @@ -509,7 +498,6 @@ mod serde_snapshot_tests { accounts.print_accounts_stats("pre_f"); accounts.calculate_accounts_delta_hash(current_slot); - accounts.update_accounts_hash_for_tests(4, &Ancestors::default(), false, false); let accounts = f(accounts, current_slot); @@ -602,12 +590,6 @@ mod serde_snapshot_tests { accounts.print_count_and_status("before reconstruct"); accounts.calculate_accounts_delta_hash(current_slot); - accounts.update_accounts_hash_for_tests( - current_slot, - &linear_ancestors(current_slot), - false, - false, - ); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); accounts.print_count_and_status("before purge zero"); @@ -720,12 +702,6 @@ mod serde_snapshot_tests { // So, prevent that from happening by introducing refcount ((current_slot - 1)..=current_slot).for_each(|slot| accounts.flush_root_write_cache(slot)); accounts.clean_accounts_for_tests(); - accounts.update_accounts_hash_for_tests( - current_slot, - &linear_ancestors(current_slot), - false, - false, - ); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); accounts.clean_accounts_for_tests(); @@ -809,8 +785,6 @@ mod serde_snapshot_tests { let no_ancestors = Ancestors::default(); let epoch_schedule = EpochSchedule::default(); - accounts.update_accounts_hash_for_tests(current_slot, &no_ancestors, false, false); - let calculated_capitalization = accounts .calculate_capitalization_at_startup_from_index(&no_ancestors, current_slot); let expected_capitalization = 22_300;