diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 34a42105381..fd61db2aedc 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -91,6 +91,7 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { Ok(()) } +#[ignore] // TODO(mediocregopher): re-enable as part of https://github.com/paradigmxyz/reth/issues/18517 #[tokio::test] async fn test_long_reorg() -> eyre::Result<()> { reth_tracing::init_test_tracing(); diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs deleted file mode 100644 index 58b76f1eacf..00000000000 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Bundle state module. -//! This module contains all the logic related to bundle state. - -mod state_reverts; -pub use state_reverts::StorageRevertsIter; diff --git a/crates/storage/provider/src/changesets_utils/mod.rs b/crates/storage/provider/src/changesets_utils/mod.rs new file mode 100644 index 00000000000..3b65825264b --- /dev/null +++ b/crates/storage/provider/src/changesets_utils/mod.rs @@ -0,0 +1,7 @@ +//! This module contains helpful utilities related to populating changesets tables. + +mod state_reverts; +pub use state_reverts::StorageRevertsIter; + +mod trie; +pub use trie::*; diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/changesets_utils/state_reverts.rs similarity index 100% rename from crates/storage/provider/src/bundle_state/state_reverts.rs rename to crates/storage/provider/src/changesets_utils/state_reverts.rs diff --git a/crates/storage/provider/src/changesets_utils/trie.rs b/crates/storage/provider/src/changesets_utils/trie.rs new file mode 100644 index 00000000000..b7a97a2e383 --- /dev/null +++ b/crates/storage/provider/src/changesets_utils/trie.rs @@ -0,0 +1,156 @@ +use alloy_primitives::B256; +use itertools::{merge_join_by, EitherOrBoth}; +use reth_db_api::{cursor::DbDupCursorRO, tables, DatabaseError}; +use reth_trie::{BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibblesSubKey}; +use std::cmp::{Ord, Ordering}; + +/// Combines a sorted iterator of trie node paths and a [`tables::StoragesTrie`] cursor into a new +/// iterator which produces the current values of all given paths in the same order. +#[derive(Debug)] +pub struct StorageTrieCurrentValuesIter<'cursor, P, C> { + /// The address of the account whose storage is being iterated. + hashed_address: B256, + /// Sorted iterator of node paths which we want the values of. + paths: P, + /// Cursor over [`tables::StoragesTrie`]. + cursor: &'cursor mut C, + /// Current value at the cursor, allows us to treat the cursor as a peekable iterator. + cursor_current: Option<(Nibbles, BranchNodeCompact)>, +} + +impl<'cursor, P, C> StorageTrieCurrentValuesIter<'cursor, P, C> +where + C: DbDupCursorRO, +{ + /// Instantiate a [`StorageTrieCurrentValuesIter`] from a sorted paths iterator and a cursor. + pub fn new( + hashed_address: B256, + paths: P, + cursor: &'cursor mut C, + ) -> Result { + let mut new_self = Self { hashed_address, paths, cursor, cursor_current: None }; + new_self.seek_cursor(Nibbles::default())?; + Ok(new_self) + } + + fn seek_cursor(&mut self, path: Nibbles) -> Result<(), DatabaseError> { + self.cursor_current = self + .cursor + .seek_by_key_subkey(self.hashed_address, StoredNibblesSubKey(path))? + .map(|e| (e.nibbles.0, e.node)); + Ok(()) + } +} + +impl<'cursor, P, C> Iterator for StorageTrieCurrentValuesIter<'cursor, P, C> +where + P: Iterator, + C: DbDupCursorRO, +{ + type Item = Result<(Nibbles, Option), DatabaseError>; + + fn next(&mut self) -> Option { + let Some(curr_path) = self.paths.next() else { + // If there are no more paths then there is no further possible output. + return None + }; + + // If the path is ahead of the cursor then seek the cursor forward to catch up. The cursor + // will seek either to `curr_path` or beyond it. + if self.cursor_current.as_ref().is_some_and(|(cursor_path, _)| curr_path > *cursor_path) { + if let Err(err) = self.seek_cursor(curr_path) { + return Some(Err(err)) + } + } + + // If there is a path but the cursor is empty then that path has no node. + if self.cursor_current.is_none() { + return Some(Ok((curr_path, None))) + } + + let (cursor_path, cursor_node) = + self.cursor_current.as_mut().expect("already checked for None"); + + // There is both a path and a cursor value, compare their paths. + match curr_path.cmp(cursor_path) { + Ordering::Less => { + // If the path is behind the cursor then there is no value for that + // path, produce None. + Some(Ok((curr_path, None))) + } + Ordering::Equal => { + // If the target path and cursor's path match then there is a value for that path, + // return the value. We don't seek the cursor here, that will be handled on the + // next call to `next` after checking that `paths` isn't None. + let cursor_node = core::mem::take(cursor_node); + Some(Ok((*cursor_path, Some(cursor_node)))) + } + Ordering::Greater => { + panic!("cursor was seeked to {curr_path:?}, but produced a node at a lower path {cursor_path:?}") + } + } + } +} + +/// Returns an iterator which produces the values to be inserted into +/// [`tables::StoragesTrieChangeSets`] for an account whose storage was wiped during a block. It is +/// expected that this is called prior to inserting the block's trie updates. +/// +/// ## Arguments +/// +/// - `curr_values_of_changed` is an iterator over the current values of all trie nodes modified by +/// the block, ordered by path. +/// - `all_nodes` is an iterator over all existing trie nodes for the account, ordered by path. +/// +/// ## Returns +/// +/// An iterator of trie node paths and a `Some(node)` (indicating the node was wiped) or a `None` +/// (indicating the node was modified in the block but didn't previously exist. The iterator's +/// results will be ordered by path. +pub fn storage_trie_wiped_changeset_iter( + curr_values_of_changed: impl Iterator< + Item = Result<(Nibbles, Option), DatabaseError>, + >, + all_nodes: impl Iterator>, +) -> Result< + impl Iterator), DatabaseError>>, + DatabaseError, +> { + let all_nodes = all_nodes.map(|e| e.map(|e| (e.1.nibbles.0, Some(e.1.node)))); + + let merged = merge_join_by(curr_values_of_changed, all_nodes, |a, b| match (a, b) { + (Err(_), _) => Ordering::Less, + (_, Err(_)) => Ordering::Greater, + (Ok(a), Ok(b)) => a.0.cmp(&b.0), + }); + + Ok(merged.map(|either_or| match either_or { + EitherOrBoth::Left(changed) => { + // A path of a changed node (given in `paths`) which was not found in the database (or + // there's an error). The current value of this path must be None, otherwise it would + // have also been returned by the `all_nodes` iter. + debug_assert!( + changed.as_ref().is_err() || changed.as_ref().is_ok_and(|(_, node)| node.is_none()), + "changed node is Some but wasn't returned by `all_nodes` iterator: {changed:?}", + ); + changed + } + EitherOrBoth::Right(wiped) => { + // A node was found in the db (indicating it was wiped) but was not given in `paths`. + // Return it as-is. + wiped + } + EitherOrBoth::Both(changed, _wiped) => { + // A path of a changed node (given in `paths`) was found with a previous value in the + // database. The changed node must have a value which is equal to the one found by the + // `all_nodes` iterator. If the changed node had no previous value (None) it wouldn't + // be returned by `all_nodes` and so would be in the Left branch. + // + // Due to the ordering closure passed to `merge_join_by` it's not possible for either + // value to be an error here. + debug_assert!(changed.is_ok(), "unreachable error condition: {changed:?}"); + debug_assert_eq!(changed, _wiped); + changed + } + })) +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 6c7826c82d7..93504dab48e 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -35,7 +35,7 @@ pub use static_file::StaticFileSegment; pub use reth_execution_types::*; -pub mod bundle_state; +pub mod changesets_utils; /// Re-export `OriginalValuesKnown` pub use revm_database::states::OriginalValuesKnown; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 160ed34a176..fd71242ff40 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,5 +1,7 @@ use crate::{ - bundle_state::StorageRevertsIter, + changesets_utils::{ + storage_trie_wiped_changeset_iter, StorageRevertsIter, StorageTrieCurrentValuesIter, + }, providers::{ database::{chain::ChainStorage, metrics}, static_file::StaticFileWriter, @@ -36,7 +38,7 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StoredBlockBodyIndices, + BlockNumberHashedAddress, ShardedKey, StoredBlockBodyIndices, }, table::Table, tables, @@ -61,8 +63,9 @@ use reth_storage_api::{ use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, - updates::{StorageTrieUpdates, TrieUpdates}, - HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, + updates::{StorageTrieUpdates, StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted}, + HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, StoredNibblesSubKey, + TrieChangeSetsEntry, }; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageTrieCursor}; use revm_database::states::{ @@ -1915,6 +1918,10 @@ impl StateWriter // If we are writing the primary storage wipe transition, the pre-existing plain // storage state has to be taken from the database and written to storage history. // See [StorageWipe::Primary] for more details. + // + // TODO(mediocregopher): This could be rewritten in a way which doesn't require + // collecting wiped entries into a Vec like this, see + // `write_storage_trie_changesets`. let mut wiped_storage = Vec::new(); if wiped { tracing::trace!(?address, "Wiping storage"); @@ -2340,11 +2347,50 @@ impl TrieWriter for DatabaseProvider Ok(num_entries) } + + /// Records the current values of all trie nodes which will be updated using the [`TrieUpdates`] + /// into the trie changesets tables. + /// + /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with + /// the same [`TrieUpdates`]. + /// + /// Returns the number of keys written. + fn write_trie_changesets( + &self, + block_number: BlockNumber, + trie_updates: &TrieUpdatesSorted, + ) -> ProviderResult { + let mut num_entries = 0; + + let mut changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + let mut curr_values_cursor = self.tx_ref().cursor_read::()?; + + for (path, _) in trie_updates.account_nodes_ref() { + num_entries += 1; + let node = curr_values_cursor.seek_exact(StoredNibbles(*path))?.map(|e| e.1); + changeset_cursor.append_dup( + block_number, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(*path), node }, + )?; + } + + let mut storage_updates = trie_updates.storage_tries.iter().collect::>(); + storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); + + num_entries += + self.write_storage_trie_changesets(block_number, storage_updates.into_iter())?; + + Ok(num_entries) + } } impl StorageTrieWriter for DatabaseProvider { - /// Writes storage trie updates from the given storage trie map. First sorts the storage trie - /// updates by the hashed address, writing in sorted order. + /// Writes storage trie updates from the given storage trie map. + /// + /// First sorts the storage trie updates by the hashed address key, writing in sorted order. + /// + /// Returns the number of entries modified. fn write_storage_trie_updates( &self, storage_tries: &B256Map, @@ -2377,6 +2423,66 @@ impl StorageTrieWriter for DatabaseP let mut trie_db_cursor = DatabaseStorageTrieCursor::new(cursor, hashed_address); Ok(trie_db_cursor.write_storage_trie_updates(updates)?) } + + /// Records the current values of all trie nodes which will be updated using the + /// [`StorageTrieUpdates`] into the storage trie changesets table. + /// + /// The intended usage of this method is to call it _prior_ to calling + /// `write_storage_trie_updates` with the same set of [`StorageTrieUpdates`]. + /// + /// Returns the number of keys written. + fn write_storage_trie_changesets<'a>( + &self, + block_number: BlockNumber, + storage_tries: impl Iterator, + ) -> ProviderResult { + let mut num_written = 0; + + let mut changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + + // We hold two cursors to the same table because we use them simultaneously when an + // account's storage is wiped. We keep them outside the for-loop so they can be re-used + // between accounts. + let mut changed_curr_values_cursor = + self.tx_ref().cursor_dup_read::()?; + let mut wiped_nodes_cursor = self.tx_ref().cursor_dup_read::()?; + + for (hashed_address, storage_trie_updates) in storage_tries { + let changeset_key = BlockNumberHashedAddress((block_number, *hashed_address)); + + // Create an iterator which produces the current values of all updated paths, or None if + // they are currently unset. + let curr_values_of_changed = StorageTrieCurrentValuesIter::new( + *hashed_address, + storage_trie_updates.storage_nodes.iter().map(|e| e.0), + &mut changed_curr_values_cursor, + )?; + + if storage_trie_updates.is_deleted() { + let all_nodes = wiped_nodes_cursor.walk_dup(Some(*hashed_address), None)?; + for wiped in storage_trie_wiped_changeset_iter(curr_values_of_changed, all_nodes)? { + let (path, node) = wiped?; + num_written += 1; + changeset_cursor.append_dup( + changeset_key, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, + )?; + } + } else { + for curr_value in curr_values_of_changed { + let (path, node) = curr_value?; + num_written += 1; + changeset_cursor.append_dup( + changeset_key, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, + )?; + } + } + } + + Ok(num_written) + } } impl HashingWriter for DatabaseProvider { @@ -3444,4 +3550,238 @@ mod tests { assert_eq!(range_result, individual_results); } + + #[test] + fn test_write_trie_changesets() { + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::{BranchNodeCompact, StorageTrieEntry}; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + let block_number = 1u64; + + // Create some test nibbles and nodes + let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); + + let node1 = BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], // hashes + None, // root hash + ); + + // Pre-populate AccountsTrie with a node that will be updated (for account_nibbles1) + { + let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); + cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap(); + } + + // Create account trie updates: one Some (update) and one None (removal) + let account_nodes = vec![ + (account_nibbles1, Some(node1.clone())), // This will update existing node + (account_nibbles2, None), // This will be a removal (no existing node) + ]; + + // Create storage trie updates + let storage_address1 = B256::from([1u8; 32]); // Normal storage trie + let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie + + let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); + let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); + let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]); + + let storage_node1 = BranchNodeCompact::new( + 0b1111_0000_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let storage_node2 = BranchNodeCompact::new( + 0b0000_1111_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create an old version of storage_node1 to prepopulate + let storage_node1_old = BranchNodeCompact::new( + 0b1010_0000_0000_0000, // Different mask to show it's an old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Pre-populate StoragesTrie for normal storage (storage_address1) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // Add node that will be updated (storage_nibbles1) with old value + let entry = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1_old.clone(), + }; + cursor.upsert(storage_address1, &entry).unwrap(); + } + + // Pre-populate StoragesTrie for wiped storage (storage_address2) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // Add node that will be updated (storage_nibbles1) + let entry1 = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1.clone(), + }; + cursor.upsert(storage_address2, &entry1).unwrap(); + // Add node that won't be updated but exists (storage_nibbles3) + let entry3 = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: storage_node2.clone(), + }; + cursor.upsert(storage_address2, &entry3).unwrap(); + } + + // Normal storage trie: one Some (update) and one None (new) + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // This will update existing node + (storage_nibbles2, None), // This is a new node + ], + }; + + // Wiped storage trie + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // Updated node already in db + (storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in db + * storage_nibbles3 is in db + * but not updated */ + ], + }; + + let mut storage_tries = B256Map::default(); + storage_tries.insert(storage_address1, storage_trie1); + storage_tries.insert(storage_address2, storage_trie2); + + let trie_updates = TrieUpdatesSorted { account_nodes, storage_tries }; + + // Write the changesets + let num_written = provider_rw.write_trie_changesets(block_number, &trie_updates).unwrap(); + + // Verify number of entries written + // Account changesets: 2 (one update, one removal) + // Storage changesets: + // - Normal storage: 2 (one update, one removal) + // - Wiped storage: 3 (two updated, one existing not updated) + // Total: 2 + 2 + 3 = 7 + assert_eq!(num_written, 7); + + // Verify account changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Get all entries for this block to see what was written + let all_entries = cursor + .walk_dup(Some(block_number), None) + .unwrap() + .collect::, _>>() + .unwrap(); + + // Assert the full value of all_entries in a single assert_eq + assert_eq!( + all_entries, + vec![ + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles1), + node: Some(node1), + } + ), + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles2), + node: None, + } + ), + ] + ); + } + + // Verify storage changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Check normal storage trie changesets + let key1 = BlockNumberHashedAddress((block_number, storage_address1)); + let entries1 = + cursor.walk_dup(Some(key1), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries1, + vec![ + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1_old), // Old value that was prepopulated + } + ), + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // New node, no previous value + } + ), + ] + ); + + // Check wiped storage trie changesets + let key2 = BlockNumberHashedAddress((block_number, storage_address2)); + let entries2 = + cursor.walk_dup(Some(key2), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries2, + vec![ + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1), // Was in db, so has old value + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // Was not in db + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: Some(storage_node2), // Existing node in wiped storage + } + ), + ] + ); + } + + provider_rw.commit().unwrap(); + } } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index bca2a4cdb4c..8c280b1b8af 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -162,10 +162,11 @@ where // Insert the blocks for ExecutedBlockWithTrieUpdates { block: ExecutedBlock { recovered_block, execution_output, hashed_state }, - trie, + mut trie, } in blocks { let block_hash = recovered_block.hash(); + let block_number = recovered_block.number(); self.database() .insert_block(Arc::unwrap_or_clone(recovered_block), StorageLocation::Both)?; @@ -180,9 +181,18 @@ where // insert hashes and intermediate merkle nodes self.database() .write_hashed_state(&Arc::unwrap_or_clone(hashed_state).into_sorted())?; - self.database().write_trie_updates( - trie.as_ref().ok_or(ProviderError::MissingTrieUpdates(block_hash))?, - )?; + + let trie_updates = + trie.take_present().ok_or(ProviderError::MissingTrieUpdates(block_hash))?; + + // sort trie updates and insert changesets + // TODO(mediocregopher): We should rework `write_trie_updates` to also accept a + // `TrieUpdatesSorted`, and then the `trie` field of `ExecutedBlockWithTrieUpdates` to + // carry a TrieUpdatesSorted. + let trie_updates_sorted = (*trie_updates).clone().into_sorted(); + self.database().write_trie_changesets(block_number, &trie_updates_sorted)?; + + self.database().write_trie_updates(&trie_updates)?; } // update history indices diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 9ae8ebee9a0..267ec9fd72a 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,8 +1,8 @@ use alloc::vec::Vec; -use alloy_primitives::{map::B256Map, Address, Bytes, B256}; +use alloy_primitives::{map::B256Map, Address, BlockNumber, Bytes, B256}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::{ - updates::{StorageTrieUpdates, TrieUpdates}, + updates::{StorageTrieUpdates, StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted}, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; @@ -96,6 +96,19 @@ pub trait TrieWriter: Send + Sync { /// /// Returns the number of entries modified. fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; + + /// Records the current values of all trie nodes which will be updated using the [`TrieUpdates`] + /// into the trie changesets tables. + /// + /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with + /// the same [`TrieUpdates`]. + /// + /// Returns the number of keys written. + fn write_trie_changesets( + &self, + block_number: BlockNumber, + trie_updates: &TrieUpdatesSorted, + ) -> ProviderResult; } /// Storage Trie Writer @@ -117,4 +130,17 @@ pub trait StorageTrieWriter: Send + Sync { hashed_address: B256, updates: &StorageTrieUpdates, ) -> ProviderResult; + + /// Records the current values of all trie nodes which will be updated using the + /// [`StorageTrieUpdates`] into the storage trie changesets table. + /// + /// The intended usage of this method is to call it _prior_ to calling + /// `write_storage_trie_updates` with the same set of [`StorageTrieUpdates`]. + /// + /// Returns the number of keys written. + fn write_storage_trie_changesets<'a>( + &self, + block_number: BlockNumber, + storage_tries: impl Iterator, + ) -> ProviderResult; }