diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index ac3e42247ca..2913b04c8ad 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -20,7 +20,7 @@ reth-optimism-payload-builder.workspace = true reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true reth-optimism-exex.workspace = true -reth-optimism-trie.workspace = true +reth-optimism-trie = { workspace = true, features = ["metrics"] } reth-node-builder.workspace = true reth-db-api.workspace = true reth-chainspec.workspace = true diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 5feb5e59b6e..bf27189c8bc 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -10,7 +10,9 @@ use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; use reth_optimism_exex::OpProofsExEx; use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_optimism_rpc::eth::proofs::{EthApiExt, EthApiOverrideServer}; -use reth_optimism_trie::{db::MdbxProofsStorage, InMemoryProofsStorage, OpProofsStorage}; +use reth_optimism_trie::{ + db::MdbxProofsStorage, InMemoryProofsStorage, OpProofsStorage, OpProofsStore, StorageMetrics, +}; use tracing::info; use std::{path::PathBuf, sync::Arc}; @@ -68,10 +70,10 @@ struct Args { async fn launch_node_with_storage( builder: WithLaunchContext, OpChainSpec>>, args: Args, - storage: S, + storage: OpProofsStorage, ) -> eyre::Result<(), ErrReport> where - S: OpProofsStorage + Clone + 'static, + S: OpProofsStore + Clone + 'static, { let storage_clone = storage.clone(); let proofs_history_enabled = args.proofs_history; @@ -106,7 +108,12 @@ fn main() { info!(target: "reth::cli", "Launching node"); if args.proofs_history_storage_in_mem { - let storage = Arc::new(InMemoryProofsStorage::new()); + // todo: enable launch without metrics + let storage = OpProofsStorage::new( + Arc::new(InMemoryProofsStorage::new()), + Arc::new(StorageMetrics::default()), + ); + launch_node_with_storage(builder, args.clone(), storage).await?; } else { let path = args @@ -115,10 +122,15 @@ fn main() { .expect("Path must be provided if not using in-memory storage"); info!(target: "reth::cli", "Using on-disk storage for proofs history"); - let storage = Arc::new( - MdbxProofsStorage::new(&path) - .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + // todo: enable launch without metrics + let storage = OpProofsStorage::new( + Arc::new( + MdbxProofsStorage::new(&path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ), + Arc::new(StorageMetrics::default()), ); + launch_node_with_storage(builder, args.clone(), storage).await?; } diff --git a/crates/optimism/exex/src/lib.rs b/crates/optimism/exex/src/lib.rs index eecbe890b79..2ba70468e00 100644 --- a/crates/optimism/exex/src/lib.rs +++ b/crates/optimism/exex/src/lib.rs @@ -14,7 +14,7 @@ use reth_chainspec::ChainInfo; use reth_exex::{ExExContext, ExExEvent}; use reth_node_api::{FullNodeComponents, NodePrimitives}; use reth_node_types::NodeTypes; -use reth_optimism_trie::{BackfillJob, OpProofsStorage}; +use reth_optimism_trie::{BackfillJob, OpProofsStore}; use reth_provider::{BlockNumReader, DBProvider, DatabaseProviderFactory}; /// OP Proofs ExEx - processes blocks and tracks state changes within fault proof window. @@ -26,7 +26,7 @@ use reth_provider::{BlockNumReader, DBProvider, DatabaseProviderFactory}; pub struct OpProofsExEx where Node: FullNodeComponents, - S: OpProofsStorage + Clone, + S: OpProofsStore + Clone, { /// The ExEx context containing the node related utilities e.g. provider, notifications, /// events. @@ -43,7 +43,7 @@ impl OpProofsExEx where Node: FullNodeComponents>, Primitives: NodePrimitives, - S: OpProofsStorage + Clone, + S: OpProofsStore + Clone, { /// Main execution loop for the ExEx pub async fn run(mut self) -> eyre::Result<()> { diff --git a/crates/optimism/rpc/src/eth/proofs.rs b/crates/optimism/rpc/src/eth/proofs.rs index a5c79d61862..0a2a574440c 100644 --- a/crates/optimism/rpc/src/eth/proofs.rs +++ b/crates/optimism/rpc/src/eth/proofs.rs @@ -9,7 +9,7 @@ use derive_more::Constructor; use jsonrpsee::proc_macros::rpc; use jsonrpsee_core::RpcResult; use jsonrpsee_types::error::ErrorObject; -use reth_optimism_trie::{provider::OpProofsStateProviderRef, OpProofsStorage}; +use reth_optimism_trie::{provider::OpProofsStateProviderRef, OpProofsStorage, OpProofsStore}; use reth_provider::{BlockIdReader, ProviderError, ProviderResult, StateProofProvider}; use reth_rpc_api::eth::helpers::FullEthApi; use reth_rpc_eth_types::EthApiError; @@ -32,14 +32,14 @@ pub trait EthApiOverride { /// Overrides applied to the `eth_` namespace of the RPC API for historical proofs ExEx. pub struct EthApiExt { eth_api: Eth, - preimage_store: P, + preimage_store: OpProofsStorage

, } impl EthApiExt where Eth: FullEthApi + Send + Sync + 'static, ErrorObject<'static>: From, - P: OpProofsStorage + Clone + 'static, + P: OpProofsStore + Clone + 'static, { async fn state_provider( &self, @@ -87,7 +87,7 @@ impl EthApiOverrideServer for EthApiExt where Eth: FullEthApi + Send + Sync + 'static, ErrorObject<'static>: From, - P: OpProofsStorage + Clone + 'static, + P: OpProofsStore + Clone + 'static, { async fn get_proof( &self, diff --git a/crates/optimism/trie/Cargo.toml b/crates/optimism/trie/Cargo.toml index 6825dfb377a..b2fe13e3cd3 100644 --- a/crates/optimism/trie/Cargo.toml +++ b/crates/optimism/trie/Cargo.toml @@ -74,3 +74,4 @@ serde-bincode-compat = [ "alloy-genesis/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", ] +metrics = ["reth-trie/metrics"] diff --git a/crates/optimism/trie/src/api.rs b/crates/optimism/trie/src/api.rs index 81b6368c613..9da403ed7d7 100644 --- a/crates/optimism/trie/src/api.rs +++ b/crates/optimism/trie/src/api.rs @@ -26,17 +26,22 @@ pub enum OpProofsStorageError { /// Error occurred while interacting with the database. #[error(transparent)] DatabaseError(#[from] DatabaseError), - /// Other error #[error("Other error: {0}")] Other(eyre::Error), } +impl From for DatabaseError { + fn from(error: OpProofsStorageError) -> Self { + Self::Other(error.to_string()) + } +} + /// Result type for storage operations pub type OpProofsStorageResult = Result; /// Seeks and iterates over trie nodes in the database by path (lexicographical order) -pub trait OpProofsTrieCursor: Send + Sync { +pub trait OpProofsTrieCursorRO: Send + Sync { /// Seek to an exact path, otherwise return None if not found. fn seek_exact( &mut self, @@ -58,7 +63,7 @@ pub trait OpProofsTrieCursor: Send + Sync { } /// Seeks and iterates over hashed entries in the database by key. -pub trait OpProofsHashedCursor: Send + Sync { +pub trait OpProofsHashedCursorRO: Send + Sync { /// Value returned by the cursor. type Value: Debug; @@ -89,29 +94,29 @@ pub struct BlockStateDiff { /// Only leaf nodes and some branch nodes are stored. The bottom layer of branch nodes /// are not stored to reduce write amplification. This matches Reth's non-historical trie storage. #[auto_impl(Arc)] -pub trait OpProofsStorage: Send + Sync + Debug { +pub trait OpProofsStore: Send + Sync + Debug { /// Cursor for iterating over trie branches. - type StorageTrieCursor<'tx>: OpProofsTrieCursor + 'tx + type StorageTrieCursor<'tx>: OpProofsTrieCursorRO + 'tx where Self: 'tx; /// Cursor for iterating over account trie branches. - type AccountTrieCursor<'tx>: OpProofsTrieCursor + 'tx + type AccountTrieCursor<'tx>: OpProofsTrieCursorRO + 'tx where Self: 'tx; /// Cursor for iterating over storage leaves. - type StorageCursor<'tx>: OpProofsHashedCursor + 'tx + type StorageCursor<'tx>: OpProofsHashedCursorRO + 'tx where Self: 'tx; /// Cursor for iterating over account leaves. - type AccountHashedCursor<'tx>: OpProofsHashedCursor + 'tx + type AccountHashedCursor<'tx>: OpProofsHashedCursorRO + 'tx where Self: 'tx; /// Store a batch of account trie branches. Used for saving existing state. For live state - /// capture, use [store_trie_updates](OpProofsStorage::store_trie_updates). + /// capture, use [store_trie_updates](OpProofsStore::store_trie_updates). fn store_account_branches( &self, account_nodes: Vec<(Nibbles, Option)>, diff --git a/crates/optimism/trie/src/backfill.rs b/crates/optimism/trie/src/backfill.rs index d1b5dc1cdf4..2de8f864567 100644 --- a/crates/optimism/trie/src/backfill.rs +++ b/crates/optimism/trie/src/backfill.rs @@ -1,6 +1,6 @@ //! Backfill job for proofs storage. Handles storing the existing state into the proofs storage. -use crate::OpProofsStorage; +use crate::OpProofsStore; use alloy_primitives::B256; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -21,7 +21,7 @@ const BACKFILL_LOG_THRESHOLD: usize = 100000; /// Backfill job for external storage. #[derive(Debug)] -pub struct BackfillJob<'a, Tx: DbTx, S: OpProofsStorage + Send> { +pub struct BackfillJob<'a, Tx: DbTx, S: OpProofsStore + Send> { storage: S, tx: &'a Tx, } @@ -196,7 +196,7 @@ async fn backfill< Ok(total_entries) } -impl<'a, Tx: DbTx, S: OpProofsStorage + Send> BackfillJob<'a, Tx, S> { +impl<'a, Tx: DbTx, S: OpProofsStore + Send> BackfillJob<'a, Tx, S> { /// Create a new backfill job. pub const fn new(storage: S, tx: &'a Tx) -> Self { Self { storage, tx } @@ -348,7 +348,7 @@ impl<'a, Tx: DbTx, S: OpProofsStorage + Send> BackfillJob<'a, Tx, S> { #[cfg(test)] mod tests { use super::*; - use crate::{InMemoryProofsStorage, OpProofsHashedCursor, OpProofsTrieCursor}; + use crate::{InMemoryProofsStorage, OpProofsHashedCursorRO, OpProofsTrieCursorRO}; use alloy_primitives::{keccak256, Address, U256}; use reth_db::{ cursor::DbCursorRW, test_utils::create_test_rw_db, transaction::DbTxMut, Database, diff --git a/crates/optimism/trie/src/cursor.rs b/crates/optimism/trie/src/cursor.rs new file mode 100644 index 00000000000..52d77745b09 --- /dev/null +++ b/crates/optimism/trie/src/cursor.rs @@ -0,0 +1,100 @@ +//! Implementation of [`HashedCursor`] and [`TrieCursor`] for +//! [`OpProofsStorage`](crate::OpProofsStorage). + +use crate::{OpProofsHashedCursorRO, OpProofsTrieCursorRO}; +use alloy_primitives::{B256, U256}; +use derive_more::{Constructor, From}; +use reth_db::DatabaseError; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::TrieCursor, + BranchNodeCompact, Nibbles, +}; + +/// Manages reading storage or account trie nodes from [`OpProofsTrieCursor`]. +#[derive(Debug, Clone, Constructor, From)] +pub struct OpProofsTrieCursor(pub C); + +impl TrieCursor for OpProofsTrieCursor +where + C: OpProofsTrieCursorRO, +{ + #[inline] + fn seek_exact( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + Ok(self.0.seek_exact(key)?) + } + + #[inline] + fn seek( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + Ok(self.0.seek(key)?) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.0.next()?) + } + + #[inline] + fn current(&mut self) -> Result, DatabaseError> { + Ok(self.0.current()?) + } +} + +/// Manages reading hashed account nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedAccountCursor(pub C); + +impl HashedCursor for OpProofsHashedAccountCursor +where + C: OpProofsHashedCursorRO + Send + Sync, +{ + type Value = Account; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + Ok(self.0.seek(key)?) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.0.next()?) + } +} + +/// Manages reading hashed storage nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedStorageCursor(pub C); + +impl HashedCursor for OpProofsHashedStorageCursor +where + C: OpProofsHashedCursorRO + Send + Sync, +{ + type Value = U256; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + Ok(self.0.seek(key)?) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.0.next()?) + } +} + +impl HashedStorageCursor for OpProofsHashedStorageCursor +where + C: OpProofsHashedCursorRO + Send + Sync, +{ + #[inline] + fn is_storage_empty(&mut self) -> Result { + Ok(self.0.is_storage_empty()?) + } +} diff --git a/crates/optimism/trie/src/cursor_factory.rs b/crates/optimism/trie/src/cursor_factory.rs new file mode 100644 index 00000000000..a5826fd2aee --- /dev/null +++ b/crates/optimism/trie/src/cursor_factory.rs @@ -0,0 +1,88 @@ +//! Provides proof operation implementations for [`OpProofsStore`]. + +use crate::{ + OpProofsHashedAccountCursor, OpProofsHashedStorageCursor, OpProofsStorage, OpProofsStore, + OpProofsTrieCursor, +}; +use alloy_primitives::B256; +use reth_db::DatabaseError; +use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; +use std::marker::PhantomData; + +/// Factory for creating trie cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsTrieCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsTrieCursorFactory<'tx, S> { + /// Initializes new `OpProofsTrieCursorFactory` + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> TrieCursorFactory for OpProofsTrieCursorFactory<'tx, S> +where + S: OpProofsStore + 'tx, +{ + type AccountTrieCursor = OpProofsTrieCursor>; + type StorageTrieCursor = OpProofsTrieCursor>; + + fn account_trie_cursor(&self) -> Result { + Ok(OpProofsTrieCursor::new( + self.storage + .account_trie_cursor(self.block_number) + .map_err(Into::::into)?, + )) + } + + fn storage_trie_cursor( + &self, + hashed_address: B256, + ) -> Result { + Ok(OpProofsTrieCursor::new( + self.storage + .storage_trie_cursor(hashed_address, self.block_number) + .map_err(Into::::into)?, + )) + } +} + +/// Factory for creating hashed account cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsHashedAccountCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsHashedAccountCursorFactory<'tx, S> { + /// Creates a new `OpProofsHashedAccountCursorFactory` instance. + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> HashedCursorFactory for OpProofsHashedAccountCursorFactory<'tx, S> +where + S: OpProofsStore + 'tx, +{ + type AccountCursor = OpProofsHashedAccountCursor>; + type StorageCursor = OpProofsHashedStorageCursor>; + + fn hashed_account_cursor(&self) -> Result { + Ok(OpProofsHashedAccountCursor::new(self.storage.account_hashed_cursor(self.block_number)?)) + } + + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result { + Ok(OpProofsHashedStorageCursor::new( + self.storage.storage_hashed_cursor(hashed_address, self.block_number)?, + )) + } +} diff --git a/crates/optimism/trie/src/db/cursor.rs b/crates/optimism/trie/src/db/cursor.rs index 854ed87a8bf..c336a913b5a 100644 --- a/crates/optimism/trie/src/db/cursor.rs +++ b/crates/optimism/trie/src/db/cursor.rs @@ -5,7 +5,7 @@ use crate::{ AccountTrieHistory, HashedAccountHistory, HashedStorageHistory, HashedStorageKey, MaybeDeleted, StorageTrieHistory, StorageTrieKey, VersionedValue, }, - OpProofsHashedCursor, OpProofsStorageError, OpProofsStorageResult, OpProofsTrieCursor, + OpProofsHashedCursorRO, OpProofsStorageError, OpProofsStorageResult, OpProofsTrieCursorRO, }; use alloy_primitives::{B256, U256}; use reth_db::{ @@ -180,7 +180,7 @@ impl< } } -impl OpProofsTrieCursor for MdbxTrieCursor +impl OpProofsTrieCursorRO for MdbxTrieCursor where Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, { @@ -215,7 +215,7 @@ where } } -impl OpProofsTrieCursor for MdbxTrieCursor +impl OpProofsTrieCursorRO for MdbxTrieCursor where Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, { @@ -278,7 +278,7 @@ where } } -impl OpProofsHashedCursor for MdbxStorageCursor +impl OpProofsHashedCursorRO for MdbxStorageCursor where Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, { @@ -310,7 +310,7 @@ where } } -impl OpProofsHashedCursor for MdbxAccountCursor +impl OpProofsHashedCursorRO for MdbxAccountCursor where Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, { @@ -920,7 +920,7 @@ mod tests { let mut cur = account_trie_cursor(&tx, 100); // Wrapper should return (Nibbles, BranchNodeCompact) - let out = OpProofsTrieCursor::seek_exact(&mut cur, k).expect("ok").expect("some"); + let out = OpProofsTrieCursorRO::seek_exact(&mut cur, k).expect("ok").expect("some"); assert_eq!(out.0, k); } @@ -939,7 +939,7 @@ mod tests { let tx = db.tx().expect("ro tx"); let mut cur = account_trie_cursor(&tx, 10); - let out = OpProofsTrieCursor::seek_exact(&mut cur, k).expect("ok"); + let out = OpProofsTrieCursorRO::seek_exact(&mut cur, k).expect("ok"); assert!(out.is_none(), "account seek_exact must filter tombstone"); } @@ -960,15 +960,15 @@ mod tests { let mut cur = account_trie_cursor(&tx, 100); // seek at k1 - let out1 = OpProofsTrieCursor::seek(&mut cur, k1).expect("ok").expect("some"); + let out1 = OpProofsTrieCursorRO::seek(&mut cur, k1).expect("ok").expect("some"); assert_eq!(out1.0, k1); // current should be k1 - let cur_k = OpProofsTrieCursor::current(&mut cur).expect("ok").expect("some"); + let cur_k = OpProofsTrieCursorRO::current(&mut cur).expect("ok").expect("some"); assert_eq!(cur_k, k1); // next should move to k2 - let out2 = OpProofsTrieCursor::next(&mut cur).expect("ok").expect("some"); + let out2 = OpProofsTrieCursorRO::next(&mut cur).expect("ok").expect("some"); assert_eq!(out2.0, k2); } @@ -994,12 +994,12 @@ mod tests { // Cursor bound to A must not see B’s data let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); - let out_a = OpProofsTrieCursor::seek_exact(&mut cur_a, path).expect("ok"); + let out_a = OpProofsTrieCursorRO::seek_exact(&mut cur_a, path).expect("ok"); assert!(out_a.is_none(), "no data for addr A"); // Cursor bound to B should see it let mut cur_b = storage_trie_cursor(&tx, 100, addr_b); - let out_b = OpProofsTrieCursor::seek_exact(&mut cur_b, path).expect("ok").expect("some"); + let out_b = OpProofsTrieCursorRO::seek_exact(&mut cur_b, path).expect("ok").expect("some"); assert_eq!(out_b.0, path); } @@ -1026,7 +1026,7 @@ mod tests { let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); // seek at p1: for A there is no p1; the next key >= p1 under A is p2 - let out = OpProofsTrieCursor::seek(&mut cur_a, p1).expect("ok").expect("some"); + let out = OpProofsTrieCursorRO::seek(&mut cur_a, p1).expect("ok").expect("some"); assert_eq!(out.0, p2); } @@ -1051,10 +1051,10 @@ mod tests { let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); // position at p1 (A) - let _ = OpProofsTrieCursor::seek_exact(&mut cur_a, p1).expect("ok").expect("some"); + let _ = OpProofsTrieCursorRO::seek_exact(&mut cur_a, p1).expect("ok").expect("some"); // next should reach boundary; impl filters different address and returns None - let out = OpProofsTrieCursor::next(&mut cur_a).expect("ok"); + let out = OpProofsTrieCursorRO::next(&mut cur_a).expect("ok"); assert!(out.is_none(), "next() should stop when next key is a different address"); } @@ -1074,9 +1074,9 @@ mod tests { let tx = db.tx().expect("ro tx"); let mut cur = storage_trie_cursor(&tx, 100, addr); - let _ = OpProofsTrieCursor::seek_exact(&mut cur, p).expect("ok").expect("some"); + let _ = OpProofsTrieCursorRO::seek_exact(&mut cur, p).expect("ok").expect("some"); - let now = OpProofsTrieCursor::current(&mut cur).expect("ok").expect("some"); + let now = OpProofsTrieCursorRO::current(&mut cur).expect("ok").expect("some"); assert_eq!(now, p); } @@ -1096,7 +1096,7 @@ mod tests { let mut cur = storage_cursor(&tx, 100, addr); let (got_slot, got_val) = - OpProofsHashedCursor::seek(&mut cur, slot).expect("ok").expect("some"); + OpProofsHashedCursorRO::seek(&mut cur, slot).expect("ok").expect("some"); assert_eq!(got_slot, slot); assert_eq!(got_val, U256::from(7)); } @@ -1117,7 +1117,7 @@ mod tests { let tx = db.tx().expect("ro"); let mut cur = storage_cursor(&tx, 10, addr); - let out = OpProofsHashedCursor::seek(&mut cur, slot).expect("ok"); + let out = OpProofsHashedCursorRO::seek(&mut cur, slot).expect("ok"); assert!(out.is_none(), "wrapper must filter tombstoned latest"); } @@ -1138,10 +1138,10 @@ mod tests { let tx = db.tx().expect("ro"); let mut cur = storage_cursor(&tx, 100, addr); - let (k1, v1) = OpProofsHashedCursor::seek(&mut cur, s1).expect("ok").expect("some"); + let (k1, v1) = OpProofsHashedCursorRO::seek(&mut cur, s1).expect("ok").expect("some"); assert_eq!((k1, v1), (s1, U256::from(11))); - let (k2, v2) = OpProofsHashedCursor::next(&mut cur).expect("ok").expect("some"); + let (k2, v2) = OpProofsHashedCursorRO::next(&mut cur).expect("ok").expect("some"); assert_eq!((k2, v2), (s2, U256::from(22))); } @@ -1159,7 +1159,8 @@ mod tests { let tx = db.tx().expect("ro"); let mut cur = account_cursor(&tx, 100); - let (got_key, _acc) = OpProofsHashedCursor::seek(&mut cur, key).expect("ok").expect("some"); + let (got_key, _acc) = + OpProofsHashedCursorRO::seek(&mut cur, key).expect("ok").expect("some"); assert_eq!(got_key, key); } @@ -1178,7 +1179,7 @@ mod tests { let tx = db.tx().expect("ro"); let mut cur = account_cursor(&tx, 10); - let out = OpProofsHashedCursor::seek(&mut cur, key).expect("ok"); + let out = OpProofsHashedCursorRO::seek(&mut cur, key).expect("ok"); assert!(out.is_none(), "wrapper must filter tombstoned latest"); } @@ -1198,10 +1199,10 @@ mod tests { let tx = db.tx().expect("ro"); let mut cur = account_cursor(&tx, 100); - let (got1, _) = OpProofsHashedCursor::seek(&mut cur, k1).expect("ok").expect("some"); + let (got1, _) = OpProofsHashedCursorRO::seek(&mut cur, k1).expect("ok").expect("some"); assert_eq!(got1, k1); - let (got2, _) = OpProofsHashedCursor::next(&mut cur).expect("ok").expect("some"); + let (got2, _) = OpProofsHashedCursorRO::next(&mut cur).expect("ok").expect("some"); assert_eq!(got2, k2); } } diff --git a/crates/optimism/trie/src/db/store.rs b/crates/optimism/trie/src/db/store.rs index 9be13b2efb7..82463252fa3 100644 --- a/crates/optimism/trie/src/db/store.rs +++ b/crates/optimism/trie/src/db/store.rs @@ -1,6 +1,5 @@ use super::{BlockNumberHash, ProofWindow, ProofWindowKey}; use crate::{ - api::OpProofsStorage, db::{ cursor::Dup, models::{ @@ -9,7 +8,7 @@ use crate::{ }, MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, }, - BlockStateDiff, OpProofsStorageError, OpProofsStorageResult, + BlockStateDiff, OpProofsStorageError, OpProofsStorageResult, OpProofsStore, }; use alloy_primitives::{map::HashMap, B256, U256}; use itertools::Itertools; @@ -63,7 +62,7 @@ impl MdbxProofsStorage { } } -impl OpProofsStorage for MdbxProofsStorage { +impl OpProofsStore for MdbxProofsStorage { type StorageTrieCursor<'tx> = MdbxTrieCursor> where diff --git a/crates/optimism/trie/src/in_memory.rs b/crates/optimism/trie/src/in_memory.rs index 4c79fa060c8..ef37b3e03b2 100644 --- a/crates/optimism/trie/src/in_memory.rs +++ b/crates/optimism/trie/src/in_memory.rs @@ -1,8 +1,8 @@ -//! In-memory implementation of [`OpProofsStorage`] for testing purposes +//! In-memory implementation of [`OpProofsStore`] for testing purposes use crate::{ - BlockStateDiff, OpProofsHashedCursor, OpProofsStorage, OpProofsStorageError, - OpProofsStorageResult, OpProofsTrieCursor, + BlockStateDiff, OpProofsHashedCursorRO, OpProofsStorageError, OpProofsStorageResult, + OpProofsStore, OpProofsTrieCursorRO, }; use alloy_primitives::{map::HashMap, B256, U256}; use reth_primitives_traits::Account; @@ -10,7 +10,7 @@ use reth_trie::{updates::TrieUpdates, BranchNodeCompact, HashedPostState, Nibble use std::{collections::BTreeMap, sync::Arc}; use tokio::sync::RwLock; -/// In-memory implementation of [`OpProofsStorage`] for testing purposes +/// In-memory implementation of [`OpProofsStore`] for testing purposes #[derive(Debug, Clone)] pub struct InMemoryProofsStorage { /// Shared state across all instances @@ -207,7 +207,7 @@ impl InMemoryTrieCursor { } } -impl OpProofsTrieCursor for InMemoryTrieCursor { +impl OpProofsTrieCursorRO for InMemoryTrieCursor { fn seek_exact( &mut self, path: Nibbles, @@ -297,7 +297,7 @@ impl InMemoryStorageCursor { } } -impl OpProofsHashedCursor for InMemoryStorageCursor { +impl OpProofsHashedCursorRO for InMemoryStorageCursor { type Value = U256; fn seek(&mut self, key: B256) -> OpProofsStorageResult> { @@ -319,7 +319,7 @@ impl OpProofsHashedCursor for InMemoryStorageCursor { } } -/// In-memory implementation of [`OpProofsHashedCursor`] for accounts +/// In-memory implementation of [`OpProofsHashedCursorRO`] for accounts #[derive(Debug)] pub struct InMemoryAccountCursor { /// Current position in the iteration (-1 means not positioned yet) @@ -357,7 +357,7 @@ impl InMemoryAccountCursor { } } -impl OpProofsHashedCursor for InMemoryAccountCursor { +impl OpProofsHashedCursorRO for InMemoryAccountCursor { type Value = Account; fn seek(&mut self, key: B256) -> OpProofsStorageResult> { @@ -379,7 +379,7 @@ impl OpProofsHashedCursor for InMemoryAccountCursor { } } -impl OpProofsStorage for InMemoryProofsStorage { +impl OpProofsStore for InMemoryProofsStorage { type StorageTrieCursor<'tx> = InMemoryTrieCursor; type AccountTrieCursor<'tx> = InMemoryTrieCursor; type StorageCursor<'tx> = InMemoryStorageCursor; diff --git a/crates/optimism/trie/src/lib.rs b/crates/optimism/trie/src/lib.rs index 91137d68c4d..a4e57110c97 100644 --- a/crates/optimism/trie/src/lib.rs +++ b/crates/optimism/trie/src/lib.rs @@ -14,8 +14,8 @@ pub mod api; pub use api::{ - BlockStateDiff, OpProofsHashedCursor, OpProofsStorage, OpProofsStorageError, - OpProofsStorageResult, OpProofsTrieCursor, + BlockStateDiff, OpProofsHashedCursorRO, OpProofsStorageError, OpProofsStorageResult, + OpProofsStore, OpProofsTrieCursorRO, }; pub mod backfill; @@ -28,11 +28,27 @@ pub use in_memory::{ pub mod db; +#[cfg(feature = "metrics")] pub mod metrics; -pub use metrics::OpProofsStorageWithMetrics; +#[cfg(feature = "metrics")] +pub use metrics::{ + OpProofsHashedAccountCursor, OpProofsHashedStorageCursor, OpProofsStorage, OpProofsTrieCursor, + StorageMetrics, +}; + +#[cfg(not(feature = "metrics"))] +/// Alias for [`OpProofsStore`] type without metrics (`metrics` feature is disabled). +pub type OpProofsStorage = S; pub mod proof; pub mod provider; pub mod live; + +pub mod cursor; +#[cfg(not(feature = "metrics"))] +pub use cursor::{OpProofsHashedAccountCursor, OpProofsHashedStorageCursor, OpProofsTrieCursor}; + +pub mod cursor_factory; +pub use cursor_factory::{OpProofsHashedAccountCursorFactory, OpProofsTrieCursorFactory}; diff --git a/crates/optimism/trie/src/live.rs b/crates/optimism/trie/src/live.rs index 7694d09b569..597eac9b594 100644 --- a/crates/optimism/trie/src/live.rs +++ b/crates/optimism/trie/src/live.rs @@ -1,9 +1,11 @@ //! Live trie collector for external proofs storage. use crate::{ - api::{BlockStateDiff, OpProofsStorage, OpProofsStorageError}, + api::{BlockStateDiff, OpProofsStorageError, OpProofsStore}, provider::OpProofsStateProviderRef, + OpProofsStorage, }; +use derive_more::Constructor; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{AlloyBlockHeader, BlockTy, RecoveredBlock}; use reth_provider::{ @@ -15,7 +17,7 @@ use std::time::Instant; use tracing::debug; /// Live trie collector for external proofs storage. -#[derive(Debug)] +#[derive(Debug, Constructor)] pub struct LiveTrieCollector<'tx, Evm, Provider, PreimageStore> where Evm: ConfigureEvm, @@ -23,20 +25,15 @@ where { evm_config: Evm, provider: Provider, - storage: &'tx PreimageStore, + storage: &'tx OpProofsStorage, } impl<'tx, Evm, Provider, Store> LiveTrieCollector<'tx, Evm, Provider, Store> where Evm: ConfigureEvm, Provider: StateReader + DatabaseProviderFactory + StateProviderFactory, - Store: 'tx + OpProofsStorage + Clone + 'static, + Store: 'tx + OpProofsStore + Clone + 'static, { - /// Create a new `LiveTrieCollector` instance - pub const fn new(evm_config: Evm, provider: Provider, storage: &'tx Store) -> Self { - Self { evm_config, provider, storage } - } - /// Execute a block and store the updates in the storage. pub async fn execute_and_store_block_updates( &self, diff --git a/crates/optimism/trie/src/metrics.rs b/crates/optimism/trie/src/metrics.rs index 1477b5dff78..c871e9ee16e 100644 --- a/crates/optimism/trie/src/metrics.rs +++ b/crates/optimism/trie/src/metrics.rs @@ -1,10 +1,11 @@ //! Storage wrapper that records metrics for all operations. -use crate::api::{ - BlockStateDiff, OpProofsHashedCursor, OpProofsStorage, OpProofsStorageResult, - OpProofsTrieCursor, +use crate::{ + cursor, BlockStateDiff, OpProofsHashedCursorRO, OpProofsStorageResult, OpProofsStore, + OpProofsTrieCursorRO, }; use alloy_primitives::{map::HashMap, B256, U256}; +use derive_more::Constructor; use metrics::{Counter, Histogram}; use reth_metrics::Metrics; use reth_primitives_traits::Account; @@ -17,6 +18,22 @@ use std::{ }; use strum::{EnumCount, EnumIter, IntoEnumIterator}; +/// Alias for [`OpProofsStorageWithMetrics`]. +pub type OpProofsStorage = OpProofsStorageWithMetrics; + +/// Alias for [`OpProofsTrieCursor`](cursor::OpProofsTrieCursor) with metrics layer. +pub type OpProofsTrieCursor = cursor::OpProofsTrieCursor>; + +/// Alias for [`OpProofsHashedAccountCursor`](cursor::OpProofsHashedAccountCursor) with metrics +/// layer. +pub type OpProofsHashedAccountCursor = + cursor::OpProofsHashedAccountCursor>; + +/// Alias for [`OpProofsHashedStorageCursor`](cursor::OpProofsHashedStorageCursor) with metrics +/// layer. +pub type OpProofsHashedStorageCursor = + cursor::OpProofsHashedStorageCursor>; + /// Types of storage operations that can be tracked. #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, EnumCount, EnumIter)] pub enum StorageOperation { @@ -200,45 +217,15 @@ pub struct BlockMetrics { pub hashed_storages_written_total: Counter, } -/// Wrapper around [`OpProofsStorage`] that records metrics for all operations. -#[derive(Debug, Clone)] -pub struct OpProofsStorageWithMetrics { - storage: S, - metrics: Arc, -} - -impl OpProofsStorageWithMetrics { - /// Create a new storage wrapper with metrics. - pub const fn new(storage: S, metrics: Arc) -> Self { - Self { storage, metrics } - } - - /// Get the underlying storage. - pub const fn inner(&self) -> &S { - &self.storage - } - - /// Get the metrics. - pub const fn metrics(&self) -> &Arc { - &self.metrics - } -} - /// Wrapper for [`OpProofsTrieCursor`] that records metrics. -#[derive(Debug)] -pub struct TrieCursorWithMetrics { +#[derive(Debug, Constructor, Clone)] +pub struct OpProofsTrieCursorWithMetrics { cursor: C, metrics: Arc, } -impl TrieCursorWithMetrics { - /// Create a new cursor wrapper with metrics. - pub const fn new(cursor: C, metrics: Arc) -> Self { - Self { cursor, metrics } - } -} - -impl OpProofsTrieCursor for TrieCursorWithMetrics { +impl OpProofsTrieCursorRO for OpProofsTrieCursorWithMetrics { + #[inline] fn seek_exact( &mut self, path: Nibbles, @@ -248,6 +235,7 @@ impl OpProofsTrieCursor for TrieCursorWithMetrics { }) } + #[inline] fn seek( &mut self, path: Nibbles, @@ -255,62 +243,79 @@ impl OpProofsTrieCursor for TrieCursorWithMetrics { self.metrics.record_operation(StorageOperation::TrieCursorSeek, || self.cursor.seek(path)) } + #[inline] fn next(&mut self) -> OpProofsStorageResult> { self.metrics.record_operation(StorageOperation::TrieCursorNext, || self.cursor.next()) } + #[inline] fn current(&mut self) -> OpProofsStorageResult> { self.metrics.record_operation(StorageOperation::TrieCursorCurrent, || self.cursor.current()) } } -/// Wrapper for [`OpProofsHashedCursor`] that records metrics. -#[derive(Debug)] -pub struct HashedCursorWithMetrics { +/// Wrapper for [`OpProofsHashedCursorRO`] type that records metrics. +#[derive(Debug, Constructor, Clone)] +pub struct OpProofsHashedCursorWithMetrics { cursor: C, metrics: Arc, } -impl HashedCursorWithMetrics { - /// Create a new cursor wrapper with metrics. - pub const fn new(cursor: C, metrics: Arc) -> Self { - Self { cursor, metrics } - } -} - -impl OpProofsHashedCursor for HashedCursorWithMetrics { +impl OpProofsHashedCursorRO for OpProofsHashedCursorWithMetrics { type Value = C::Value; + #[inline] fn seek(&mut self, key: B256) -> OpProofsStorageResult> { self.metrics.record_operation(StorageOperation::HashedCursorSeek, || self.cursor.seek(key)) } + #[inline] fn next(&mut self) -> OpProofsStorageResult> { self.metrics.record_operation(StorageOperation::HashedCursorNext, || self.cursor.next()) } } -impl OpProofsStorage for OpProofsStorageWithMetrics +/// Wrapper around [`OpProofsStorage`] that records metrics for all operations. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsStorageWithMetrics { + storage: S, + metrics: Arc, +} + +impl OpProofsStorageWithMetrics { + /// Get the underlying storage. + pub const fn inner(&self) -> &S { + &self.storage + } + + /// Get the metrics. + pub const fn metrics(&self) -> &Arc { + &self.metrics + } +} + +impl OpProofsStore for OpProofsStorageWithMetrics where - S: OpProofsStorage, + S: OpProofsStore, { type StorageTrieCursor<'tx> - = TrieCursorWithMetrics> + = OpProofsTrieCursorWithMetrics> where S: 'tx; type AccountTrieCursor<'tx> - = TrieCursorWithMetrics> + = OpProofsTrieCursorWithMetrics> where S: 'tx; type StorageCursor<'tx> - = HashedCursorWithMetrics> + = OpProofsHashedCursorWithMetrics> where S: 'tx; type AccountHashedCursor<'tx> - = HashedCursorWithMetrics> + = OpProofsHashedCursorWithMetrics> where S: 'tx; + #[inline] async fn store_account_branches( &self, account_nodes: Vec<(Nibbles, Option)>, @@ -332,6 +337,7 @@ where result } + #[inline] async fn store_storage_branches( &self, hashed_address: B256, @@ -354,6 +360,7 @@ where result } + #[inline] async fn store_hashed_accounts( &self, accounts: Vec<(B256, Option)>, @@ -375,6 +382,7 @@ where result } + #[inline] async fn store_hashed_storages( &self, hashed_address: B256, @@ -397,49 +405,56 @@ where result } + #[inline] async fn get_earliest_block_number(&self) -> OpProofsStorageResult> { self.storage.get_earliest_block_number().await } + #[inline] async fn get_latest_block_number(&self) -> OpProofsStorageResult> { self.storage.get_latest_block_number().await } + #[inline] fn storage_trie_cursor<'tx>( &self, hashed_address: B256, max_block_number: u64, ) -> OpProofsStorageResult> { let cursor = self.storage.storage_trie_cursor(hashed_address, max_block_number)?; - Ok(TrieCursorWithMetrics::new(cursor, self.metrics.clone())) + Ok(OpProofsTrieCursorWithMetrics::new(cursor, self.metrics.clone())) } + #[inline] fn account_trie_cursor<'tx>( &self, max_block_number: u64, ) -> OpProofsStorageResult> { let cursor = self.storage.account_trie_cursor(max_block_number)?; - Ok(TrieCursorWithMetrics::new(cursor, self.metrics.clone())) + Ok(OpProofsTrieCursorWithMetrics::new(cursor, self.metrics.clone())) } + #[inline] fn storage_hashed_cursor<'tx>( &self, hashed_address: B256, max_block_number: u64, ) -> OpProofsStorageResult> { let cursor = self.storage.storage_hashed_cursor(hashed_address, max_block_number)?; - Ok(HashedCursorWithMetrics::new(cursor, self.metrics.clone())) + Ok(OpProofsHashedCursorWithMetrics::new(cursor, self.metrics.clone())) } + #[inline] fn account_hashed_cursor<'tx>( &self, max_block_number: u64, ) -> OpProofsStorageResult> { let cursor = self.storage.account_hashed_cursor(max_block_number)?; - Ok(HashedCursorWithMetrics::new(cursor, self.metrics.clone())) + Ok(OpProofsHashedCursorWithMetrics::new(cursor, self.metrics.clone())) } // no metrics for these + #[inline] async fn store_trie_updates( &self, block_number: u64, @@ -448,10 +463,11 @@ where self.storage.store_trie_updates(block_number, block_state_diff).await } + #[inline] async fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { self.storage.fetch_trie_updates(block_number).await } - + #[inline] async fn prune_earliest_state( &self, new_earliest_block_number: u64, @@ -460,6 +476,7 @@ where self.storage.prune_earliest_state(new_earliest_block_number, diff).await } + #[inline] async fn replace_updates( &self, latest_common_block_number: u64, @@ -468,6 +485,7 @@ where self.storage.replace_updates(latest_common_block_number, blocks_to_add).await } + #[inline] async fn set_earliest_block_number( &self, block_number: u64, diff --git a/crates/optimism/trie/src/proof.rs b/crates/optimism/trie/src/proof.rs index 818c376f873..9e88ecf201b 100644 --- a/crates/optimism/trie/src/proof.rs +++ b/crates/optimism/trie/src/proof.rs @@ -1,218 +1,33 @@ //! Provides proof operation implementations for [`OpProofsStorage`]. -use crate::api::{ - OpProofsHashedCursor, OpProofsStorage, OpProofsStorageError, - OpProofsTrieCursor as OpProofsDBTrieCursor, +use crate::{ + OpProofsHashedAccountCursorFactory, OpProofsStorage, OpProofsStore, OpProofsTrieCursorFactory, }; use alloy_primitives::{ keccak256, map::{B256Map, HashMap}, - Address, Bytes, B256, U256, + Address, Bytes, B256, }; -use core::marker; -use reth_db::DatabaseError; use reth_execution_errors::{StateProofError, StateRootError, StorageRootError, TrieWitnessError}; -use reth_primitives_traits::Account; use reth_trie::{ - hashed_cursor::{ - HashedCursor, HashedCursorFactory, HashedPostStateCursorFactory, HashedStorageCursor, - }, + hashed_cursor::HashedPostStateCursorFactory, metrics::TrieRootMetrics, proof::{Proof, StorageProof}, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory}, + trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdates, witness::TrieWitness, - AccountProof, BranchNodeCompact, HashedPostState, HashedPostStateSorted, HashedStorage, - MultiProof, MultiProofTargets, Nibbles, StateRoot, StorageMultiProof, StorageRoot, TrieInput, - TrieType, + AccountProof, HashedPostState, HashedPostStateSorted, HashedStorage, MultiProof, + MultiProofTargets, StateRoot, StorageMultiProof, StorageRoot, TrieInput, TrieType, }; -/// Manages reading storage or account trie nodes from [`OpProofsDBTrieCursor`]. -#[derive(Debug, Clone)] -pub struct OpProofsTrieCursor(pub C); - -impl OpProofsTrieCursor { - /// Creates a new `OpProofsTrieCursor` instance. - pub const fn new(cursor: C) -> Self { - Self(cursor) - } -} - -impl From for DatabaseError { - fn from(error: OpProofsStorageError) -> Self { - Self::Other(error.to_string()) - } -} - -impl TrieCursor for OpProofsTrieCursor -where - C: OpProofsDBTrieCursor + Send + Sync, -{ - fn seek_exact( - &mut self, - key: Nibbles, - ) -> Result, DatabaseError> { - Ok(self.0.seek_exact(key)?) - } - - fn seek( - &mut self, - key: Nibbles, - ) -> Result, DatabaseError> { - Ok(self.0.seek(key)?) - } - - fn next(&mut self) -> Result, DatabaseError> { - Ok(self.0.next()?) - } - - fn current(&mut self) -> Result, DatabaseError> { - Ok(self.0.current()?) - } -} - -/// Factory for creating trie cursors for [`OpProofsStorage`]. -#[derive(Debug, Clone)] -pub struct OpProofsTrieCursorFactory<'tx, Storage: OpProofsStorage> { - storage: &'tx Storage, - block_number: u64, - _marker: marker::PhantomData<&'tx ()>, -} - -impl<'tx, Storage: OpProofsStorage> OpProofsTrieCursorFactory<'tx, Storage> { - /// Initializes new `OpProofsTrieCursorFactory` - pub const fn new(storage: &'tx Storage, block_number: u64) -> Self { - Self { storage, block_number, _marker: core::marker::PhantomData } - } -} - -impl<'tx, Storage: OpProofsStorage + 'tx> TrieCursorFactory - for OpProofsTrieCursorFactory<'tx, Storage> -{ - type AccountTrieCursor = OpProofsTrieCursor>; - type StorageTrieCursor = OpProofsTrieCursor>; - - fn account_trie_cursor(&self) -> Result { - Ok(OpProofsTrieCursor::new( - self.storage - .account_trie_cursor(self.block_number) - .map_err(Into::::into)?, - )) - } - - fn storage_trie_cursor( - &self, - hashed_address: B256, - ) -> Result { - Ok(OpProofsTrieCursor::new( - self.storage - .storage_trie_cursor(hashed_address, self.block_number) - .map_err(Into::::into)?, - )) - } -} - -/// Manages reading hashed account nodes from external storage. -#[derive(Debug, Clone)] -pub struct OpProofsHashedAccountCursor(pub C); - -impl OpProofsHashedAccountCursor { - /// Creates a new `OpProofsHashedAccountCursor` instance. - pub const fn new(cursor: C) -> Self { - Self(cursor) - } -} - -impl + Send + Sync> HashedCursor - for OpProofsHashedAccountCursor -{ - type Value = Account; - - fn seek(&mut self, key: B256) -> Result, DatabaseError> { - Ok(self.0.seek(key)?) - } - - fn next(&mut self) -> Result, DatabaseError> { - Ok(self.0.next()?) - } -} - -/// Manages reading hashed storage nodes from [`OpProofsHashedCursor`]. -#[derive(Debug, Clone)] -pub struct OpProofsHashedStorageCursor>(pub C); - -impl> OpProofsHashedStorageCursor { - /// Creates a new `OpProofsHashedStorageCursor` instance. - pub const fn new(cursor: C) -> Self { - Self(cursor) - } -} - -impl + Send + Sync> HashedCursor - for OpProofsHashedStorageCursor -{ - type Value = U256; - - fn seek(&mut self, key: B256) -> Result, DatabaseError> { - Ok(self.0.seek(key)?) - } - - fn next(&mut self) -> Result, DatabaseError> { - Ok(self.0.next()?) - } -} - -impl + Send + Sync> HashedStorageCursor - for OpProofsHashedStorageCursor -{ - fn is_storage_empty(&mut self) -> Result { - Ok(self.0.is_storage_empty()?) - } -} - -/// Factory for creating hashed account cursors for [`OpProofsStorage`]. -#[derive(Debug, Clone)] -pub struct OpProofsHashedAccountCursorFactory<'tx, Storage: OpProofsStorage> { - storage: &'tx Storage, - block_number: u64, - _marker: core::marker::PhantomData<&'tx ()>, -} - -impl<'tx, Storage: OpProofsStorage + 'tx> OpProofsHashedAccountCursorFactory<'tx, Storage> { - /// Creates a new `OpProofsHashedAccountCursorFactory` instance. - pub const fn new(storage: &'tx Storage, block_number: u64) -> Self { - Self { storage, block_number, _marker: core::marker::PhantomData } - } -} - -impl<'tx, Storage: OpProofsStorage + 'tx> HashedCursorFactory - for OpProofsHashedAccountCursorFactory<'tx, Storage> -{ - type AccountCursor = OpProofsHashedAccountCursor>; - type StorageCursor = OpProofsHashedStorageCursor>; - - fn hashed_account_cursor(&self) -> Result { - Ok(OpProofsHashedAccountCursor::new(self.storage.account_hashed_cursor(self.block_number)?)) - } - - fn hashed_storage_cursor( - &self, - hashed_address: B256, - ) -> Result { - Ok(OpProofsHashedStorageCursor::new( - self.storage.storage_hashed_cursor(hashed_address, self.block_number)?, - )) - } -} - /// Extends [`Proof`] with operations specific for working with [`OpProofsStorage`]. -pub trait DatabaseProof<'tx, Storage> { +pub trait DatabaseProof<'tx, S> { /// Creates a new `DatabaseProof` instance from external storage. - fn from_tx(storage: &'tx Storage, block_number: u64) -> Self; + fn from_tx(storage: &'tx OpProofsStorage, block_number: u64) -> Self; /// Generates the state proof for target account based on [`TrieInput`]. fn overlay_account_proof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, address: Address, @@ -221,21 +36,20 @@ pub trait DatabaseProof<'tx, Storage> { /// Generates the state [`MultiProof`] for target hashed account and storage keys. fn overlay_multiproof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, targets: MultiProofTargets, ) -> Result; } -impl<'tx, Storage: OpProofsStorage + Clone> DatabaseProof<'tx, Storage> - for Proof< - OpProofsTrieCursorFactory<'tx, Storage>, - OpProofsHashedAccountCursorFactory<'tx, Storage>, - > +impl<'tx, S> DatabaseProof<'tx, S> + for Proof, OpProofsHashedAccountCursorFactory<'tx, S>> +where + S: OpProofsStore + Clone, { /// Create a new [`Proof`] instance from [`OpProofsStorage`]. - fn from_tx(storage: &'tx Storage, block_number: u64) -> Self { + fn from_tx(storage: &'tx OpProofsStorage, block_number: u64) -> Self { Self::new( OpProofsTrieCursorFactory::new(storage, block_number), OpProofsHashedAccountCursorFactory::new(storage, block_number), @@ -244,7 +58,7 @@ impl<'tx, Storage: OpProofsStorage + Clone> DatabaseProof<'tx, Storage> /// Generates the state proof for target account based on [`TrieInput`]. fn overlay_account_proof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, address: Address, @@ -267,7 +81,7 @@ impl<'tx, Storage: OpProofsStorage + Clone> DatabaseProof<'tx, Storage> /// Generates the state [`MultiProof`] for target hashed account and storage keys. fn overlay_multiproof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, targets: MultiProofTargets, @@ -289,13 +103,13 @@ impl<'tx, Storage: OpProofsStorage + Clone> DatabaseProof<'tx, Storage> } /// Extends [`StorageProof`] with operations specific for working with [`OpProofsStorage`]. -pub trait DatabaseStorageProof<'tx, Storage> { +pub trait DatabaseStorageProof<'tx, S> { /// Create a new [`StorageProof`] from [`OpProofsStorage`] and account address. - fn from_tx(storage: &'tx Storage, block_number: u64, address: Address) -> Self; + fn from_tx(storage: &'tx OpProofsStorage, block_number: u64, address: Address) -> Self; /// Generates the storage proof for target slot based on [`TrieInput`]. fn overlay_storage_proof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, address: Address, slot: B256, @@ -304,7 +118,7 @@ pub trait DatabaseStorageProof<'tx, Storage> { /// Generates the storage multiproof for target slots based on [`TrieInput`]. fn overlay_storage_multiproof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, address: Address, slots: &[B256], @@ -312,14 +126,13 @@ pub trait DatabaseStorageProof<'tx, Storage> { ) -> Result; } -impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStorageProof<'tx, Storage> - for StorageProof< - OpProofsTrieCursorFactory<'tx, Storage>, - OpProofsHashedAccountCursorFactory<'tx, Storage>, - > +impl<'tx, S> DatabaseStorageProof<'tx, S> + for StorageProof, OpProofsHashedAccountCursorFactory<'tx, S>> +where + S: OpProofsStore + 'tx + Clone, { /// Create a new [`StorageProof`] from [`OpProofsStorage`] and account address. - fn from_tx(storage: &'tx Storage, block_number: u64, address: Address) -> Self { + fn from_tx(storage: &'tx OpProofsStorage, block_number: u64, address: Address) -> Self { Self::new( OpProofsTrieCursorFactory::new(storage, block_number), OpProofsHashedAccountCursorFactory::new(storage, block_number), @@ -328,7 +141,7 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStorageProof<'tx, Stor } fn overlay_storage_proof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, address: Address, slot: B256, @@ -350,7 +163,7 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStorageProof<'tx, Stor } fn overlay_storage_multiproof( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, address: Address, slots: &[B256], @@ -374,7 +187,7 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStorageProof<'tx, Stor } /// Extends [`StateRoot`] with operations specific for working with [`OpProofsStorage`]. -pub trait DatabaseStateRoot<'tx, Storage: OpProofsStorage + 'tx + Clone>: Sized { +pub trait DatabaseStateRoot<'tx, S: OpProofsStore + 'tx + Clone>: Sized { /// Calculate the state root for this [`HashedPostState`]. /// Internally, this method retrieves prefixsets and uses them /// to calculate incremental state root. @@ -383,7 +196,7 @@ pub trait DatabaseStateRoot<'tx, Storage: OpProofsStorage + 'tx + Clone>: Sized /// /// The state root for this [`HashedPostState`]. fn overlay_root( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, post_state: HashedPostState, ) -> Result; @@ -391,14 +204,14 @@ pub trait DatabaseStateRoot<'tx, Storage: OpProofsStorage + 'tx + Clone>: Sized /// Calculates the state root for this [`HashedPostState`] and returns it alongside trie /// updates. See [`Self::overlay_root`] for more info. fn overlay_root_with_updates( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, post_state: HashedPostState, ) -> Result<(B256, TrieUpdates), StateRootError>; /// Calculates the state root for provided [`HashedPostState`] using cached intermediate nodes. fn overlay_root_from_nodes( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, ) -> Result; @@ -406,20 +219,19 @@ pub trait DatabaseStateRoot<'tx, Storage: OpProofsStorage + 'tx + Clone>: Sized /// Calculates the state root and trie updates for provided [`HashedPostState`] using /// cached intermediate nodes. fn overlay_root_from_nodes_with_updates( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, ) -> Result<(B256, TrieUpdates), StateRootError>; } -impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStateRoot<'tx, Storage> - for StateRoot< - OpProofsTrieCursorFactory<'tx, Storage>, - OpProofsHashedAccountCursorFactory<'tx, Storage>, - > +impl<'tx, S> DatabaseStateRoot<'tx, S> + for StateRoot, OpProofsHashedAccountCursorFactory<'tx, S>> +where + S: OpProofsStore + 'tx + Clone, { fn overlay_root( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, post_state: HashedPostState, ) -> Result { @@ -437,7 +249,7 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStateRoot<'tx, Storage } fn overlay_root_with_updates( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, post_state: HashedPostState, ) -> Result<(B256, TrieUpdates), StateRootError> { @@ -455,7 +267,7 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStateRoot<'tx, Storage } fn overlay_root_from_nodes( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, ) -> Result { @@ -476,7 +288,7 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStateRoot<'tx, Storage } fn overlay_root_from_nodes_with_updates( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, ) -> Result<(B256, TrieUpdates), StateRootError> { @@ -498,24 +310,23 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStateRoot<'tx, Storage } /// Extends [`StorageRoot`] with operations specific for working with [`OpProofsStorage`]. -pub trait DatabaseStorageRoot<'tx, Storage: OpProofsStorage + 'tx + Clone> { +pub trait DatabaseStorageRoot<'tx, S: OpProofsStore + 'tx + Clone> { /// Calculates the storage root for provided [`HashedStorage`]. fn overlay_root( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, address: Address, hashed_storage: HashedStorage, ) -> Result; } -impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStorageRoot<'tx, Storage> - for StorageRoot< - OpProofsTrieCursorFactory<'tx, Storage>, - OpProofsHashedAccountCursorFactory<'tx, Storage>, - > +impl<'tx, S> DatabaseStorageRoot<'tx, S> + for StorageRoot, OpProofsHashedAccountCursorFactory<'tx, S>> +where + S: OpProofsStore + 'tx + Clone, { fn overlay_root( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, address: Address, hashed_storage: HashedStorage, @@ -538,26 +349,25 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseStorageRoot<'tx, Stora } /// Extends [`TrieWitness`] with operations specific for working with [`OpProofsStorage`]. -pub trait DatabaseTrieWitness<'tx, Storage: OpProofsStorage + 'tx + Clone> { +pub trait DatabaseTrieWitness<'tx, S: OpProofsStore + 'tx + Clone> { /// Creates a new [`TrieWitness`] instance from [`OpProofsStorage`]. - fn from_tx(storage: &'tx Storage, block_number: u64) -> Self; + fn from_tx(storage: &'tx OpProofsStorage, block_number: u64) -> Self; /// Generates the trie witness for the target state based on [`TrieInput`]. fn overlay_witness( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, target: HashedPostState, ) -> Result, TrieWitnessError>; } -impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseTrieWitness<'tx, Storage> - for TrieWitness< - OpProofsTrieCursorFactory<'tx, Storage>, - OpProofsHashedAccountCursorFactory<'tx, Storage>, - > +impl<'tx, S> DatabaseTrieWitness<'tx, S> + for TrieWitness, OpProofsHashedAccountCursorFactory<'tx, S>> +where + S: OpProofsStore + 'tx + Clone, { - fn from_tx(storage: &'tx Storage, block_number: u64) -> Self { + fn from_tx(storage: &'tx OpProofsStorage, block_number: u64) -> Self { Self::new( OpProofsTrieCursorFactory::new(storage, block_number), OpProofsHashedAccountCursorFactory::new(storage, block_number), @@ -565,7 +375,7 @@ impl<'tx, Storage: OpProofsStorage + 'tx + Clone> DatabaseTrieWitness<'tx, Stora } fn overlay_witness( - storage: &'tx Storage, + storage: &'tx OpProofsStorage, block_number: u64, input: TrieInput, target: HashedPostState, diff --git a/crates/optimism/trie/src/provider.rs b/crates/optimism/trie/src/provider.rs index 938ec61496a..ce7f2d99e5d 100644 --- a/crates/optimism/trie/src/provider.rs +++ b/crates/optimism/trie/src/provider.rs @@ -1,11 +1,11 @@ //! Provider for external proofs storage use crate::{ - api::{OpProofsHashedCursor, OpProofsStorage, OpProofsStorageError}, proof::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, }, + OpProofsHashedCursorRO, OpProofsStorage, OpProofsStorageError, OpProofsStore, }; use alloy_primitives::keccak256; use derive_more::Constructor; @@ -29,12 +29,12 @@ use std::fmt::Debug; /// State provider for external proofs storage. #[derive(Constructor)] -pub struct OpProofsStateProviderRef<'a, Storage: OpProofsStorage> { +pub struct OpProofsStateProviderRef<'a, Storage: OpProofsStore> { /// Historical state provider for non-state related tasks. latest: Box, /// Storage provider for state lookups. - storage: &'a Storage, + storage: &'a OpProofsStorage, /// Max block number that can be used for state lookups. block_number: BlockNumber, @@ -42,7 +42,7 @@ pub struct OpProofsStateProviderRef<'a, Storage: OpProofsStorage> { impl<'a, Storage> Debug for OpProofsStateProviderRef<'a, Storage> where - Storage: OpProofsStorage + 'a + Debug, + Storage: OpProofsStore + 'a + Debug, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpProofsStateProviderRef") @@ -58,7 +58,7 @@ impl From for ProviderError { } } -impl<'a, Storage: OpProofsStorage> BlockHashReader for OpProofsStateProviderRef<'a, Storage> { +impl<'a, Storage: OpProofsStore> BlockHashReader for OpProofsStateProviderRef<'a, Storage> { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { self.latest.block_hash(number) } @@ -72,7 +72,7 @@ impl<'a, Storage: OpProofsStorage> BlockHashReader for OpProofsStateProviderRef< } } -impl<'a, Storage: OpProofsStorage + Clone> StateRootProvider +impl<'a, Storage: OpProofsStore + Clone> StateRootProvider for OpProofsStateProviderRef<'a, Storage> { fn state_root(&self, state: HashedPostState) -> ProviderResult { @@ -102,7 +102,7 @@ impl<'a, Storage: OpProofsStorage + Clone> StateRootProvider } } -impl<'a, Storage: OpProofsStorage + Clone> StorageRootProvider +impl<'a, Storage: OpProofsStore + Clone> StorageRootProvider for OpProofsStateProviderRef<'a, Storage> { fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { @@ -137,7 +137,7 @@ impl<'a, Storage: OpProofsStorage + Clone> StorageRootProvider } } -impl<'a, Storage: OpProofsStorage + Clone> StateProofProvider +impl<'a, Storage: OpProofsStore + Clone> StateProofProvider for OpProofsStateProviderRef<'a, Storage> { fn proof( @@ -166,15 +166,13 @@ impl<'a, Storage: OpProofsStorage + Clone> StateProofProvider } } -impl<'a, Storage: OpProofsStorage> HashedPostStateProvider - for OpProofsStateProviderRef<'a, Storage> -{ +impl<'a, Storage: OpProofsStore> HashedPostStateProvider for OpProofsStateProviderRef<'a, Storage> { fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { HashedPostState::from_bundle_state::(bundle_state.state()) } } -impl<'a, Storage: OpProofsStorage> AccountReader for OpProofsStateProviderRef<'a, Storage> { +impl<'a, Storage: OpProofsStore> AccountReader for OpProofsStateProviderRef<'a, Storage> { fn basic_account(&self, address: &Address) -> ProviderResult> { let hashed_key = keccak256(address.0); Ok(self @@ -189,7 +187,7 @@ impl<'a, Storage: OpProofsStorage> AccountReader for OpProofsStateProviderRef<'a impl<'a, Storage> StateProvider for OpProofsStateProviderRef<'a, Storage> where - Storage: OpProofsStorage + Clone, + Storage: OpProofsStore + Clone, { fn storage(&self, address: Address, storage_key: B256) -> ProviderResult> { let hashed_key = keccak256(storage_key); @@ -203,7 +201,7 @@ where } } -impl<'a, Storage: OpProofsStorage> BytecodeReader for OpProofsStateProviderRef<'a, Storage> { +impl<'a, Storage: OpProofsStore> BytecodeReader for OpProofsStateProviderRef<'a, Storage> { fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { self.latest.bytecode_by_hash(code_hash) } diff --git a/crates/optimism/trie/tests/lib.rs b/crates/optimism/trie/tests/lib.rs index 23ad43fedf0..a15bc0b5ed1 100644 --- a/crates/optimism/trie/tests/lib.rs +++ b/crates/optimism/trie/tests/lib.rs @@ -2,8 +2,8 @@ use alloy_primitives::{map::HashMap, B256, U256}; use reth_optimism_trie::{ - BlockStateDiff, InMemoryProofsStorage, OpProofsHashedCursor, OpProofsStorage, - OpProofsStorageError, OpProofsTrieCursor, + BlockStateDiff, InMemoryProofsStorage, OpProofsHashedCursorRO, OpProofsStorageError, + OpProofsStore, OpProofsTrieCursorRO, }; use reth_primitives_traits::Account; use reth_trie::{updates::TrieUpdates, BranchNodeCompact, HashedPostState, Nibbles, TrieMask}; @@ -66,7 +66,7 @@ fn create_test_account_with_values(nonce: u64, balance: u64, code_hash_byte: u8) /// Test basic storage and retrieval of earliest block number #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_earliest_block_operations( +async fn test_earliest_block_operations( storage: S, ) -> Result<(), OpProofsStorageError> { // Initially should be None @@ -87,7 +87,7 @@ async fn test_earliest_block_operations( /// Test storing and retrieving trie updates #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_trie_updates_operations( +async fn test_trie_updates_operations( storage: S, ) -> Result<(), OpProofsStorageError> { let block_number = 50; @@ -114,9 +114,7 @@ async fn test_trie_updates_operations( /// Test cursor operations on empty trie #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_cursor_empty_trie( - storage: S, -) -> Result<(), OpProofsStorageError> { +async fn test_cursor_empty_trie(storage: S) -> Result<(), OpProofsStorageError> { let mut cursor = storage.account_trie_cursor(100)?; // All operations should return None on empty trie @@ -131,7 +129,7 @@ async fn test_cursor_empty_trie( /// Test cursor operations with single entry #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_cursor_single_entry( +async fn test_cursor_single_entry( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); @@ -158,7 +156,7 @@ async fn test_cursor_single_entry( /// Test cursor operations with multiple entries #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_cursor_multiple_entries( +async fn test_cursor_multiple_entries( storage: S, ) -> Result<(), OpProofsStorageError> { let paths = vec![ @@ -198,7 +196,7 @@ async fn test_cursor_multiple_entries( /// Test `seek_exact` with existing path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_seek_exact_existing_path( +async fn test_seek_exact_existing_path( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); @@ -216,7 +214,7 @@ async fn test_seek_exact_existing_path( /// Test `seek_exact` with non-existing path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_seek_exact_non_existing_path( +async fn test_seek_exact_non_existing_path( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); @@ -234,7 +232,7 @@ async fn test_seek_exact_non_existing_path( /// Test `seek_exact` with empty path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_seek_exact_empty_path( +async fn test_seek_exact_empty_path( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![]); @@ -252,7 +250,7 @@ async fn test_seek_exact_empty_path( /// Test seek to existing path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_seek_to_existing_path( +async fn test_seek_to_existing_path( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); @@ -270,7 +268,7 @@ async fn test_seek_to_existing_path( /// Test seek between existing nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_seek_between_existing_nodes( +async fn test_seek_between_existing_nodes( storage: S, ) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); @@ -292,7 +290,7 @@ async fn test_seek_between_existing_nodes( /// Test seek after all nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_seek_after_all_nodes( +async fn test_seek_after_all_nodes( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1]); @@ -311,7 +309,7 @@ async fn test_seek_after_all_nodes( /// Test seek before all nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_seek_before_all_nodes( +async fn test_seek_before_all_nodes( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![5]); @@ -335,7 +333,7 @@ async fn test_seek_before_all_nodes( /// Test next without prior seek #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_next_without_prior_seek( +async fn test_next_without_prior_seek( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); @@ -354,7 +352,7 @@ async fn test_next_without_prior_seek( /// Test next after seek #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_next_after_seek(storage: S) -> Result<(), OpProofsStorageError> { +async fn test_next_after_seek(storage: S) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); let path2 = nibbles_from(vec![2]); let branch = create_test_branch(); @@ -375,7 +373,7 @@ async fn test_next_after_seek(storage: S) -> Result<(), OpPr /// Test next at end of trie #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_next_at_end_of_trie( +async fn test_next_at_end_of_trie( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1]); @@ -395,7 +393,7 @@ async fn test_next_at_end_of_trie( /// Test multiple consecutive next calls #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_multiple_consecutive_next( +async fn test_multiple_consecutive_next( storage: S, ) -> Result<(), OpProofsStorageError> { let paths = vec![nibbles_from(vec![1]), nibbles_from(vec![2]), nibbles_from(vec![3])]; @@ -422,7 +420,7 @@ async fn test_multiple_consecutive_next( /// Test current after operations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_current_after_operations( +async fn test_current_after_operations( storage: S, ) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); @@ -451,7 +449,7 @@ async fn test_current_after_operations( /// Test current with no prior operations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_current_no_prior_operations( +async fn test_current_no_prior_operations( storage: S, ) -> Result<(), OpProofsStorageError> { let mut cursor = storage.account_trie_cursor(100)?; @@ -469,7 +467,7 @@ async fn test_current_no_prior_operations( /// Test same path with different blocks #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_same_path_different_blocks( +async fn test_same_path_different_blocks( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); @@ -496,7 +494,7 @@ async fn test_same_path_different_blocks( /// Test deleted branch nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_deleted_branch_nodes( +async fn test_deleted_branch_nodes( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); @@ -525,7 +523,7 @@ async fn test_deleted_branch_nodes( /// Test account-specific cursor #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_account_specific_cursor( +async fn test_account_specific_cursor( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); @@ -561,9 +559,7 @@ async fn test_account_specific_cursor( /// Test state trie cursor #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_state_trie_cursor( - storage: S, -) -> Result<(), OpProofsStorageError> { +async fn test_state_trie_cursor(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); let addr = B256::repeat_byte(0x01); let branch = create_test_branch(); @@ -592,7 +588,7 @@ async fn test_state_trie_cursor( /// Test mixed account and state data #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_mixed_account_state_data( +async fn test_mixed_account_state_data( storage: S, ) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); @@ -632,7 +628,7 @@ async fn test_mixed_account_state_data( /// Test lexicographic ordering #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_lexicographic_ordering( +async fn test_lexicographic_ordering( storage: S, ) -> Result<(), OpProofsStorageError> { let paths = vec![ @@ -670,7 +666,7 @@ async fn test_lexicographic_ordering( /// Test path prefix scenarios #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_path_prefix_scenarios( +async fn test_path_prefix_scenarios( storage: S, ) -> Result<(), OpProofsStorageError> { let paths = vec![ @@ -703,7 +699,7 @@ async fn test_path_prefix_scenarios( /// Test complex nibble combinations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_complex_nibble_combinations( +async fn test_complex_nibble_combinations( storage: S, ) -> Result<(), OpProofsStorageError> { // Test various nibble patterns including edge values @@ -744,7 +740,7 @@ async fn test_complex_nibble_combinations( /// Test store and retrieve single account #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_store_and_retrieve_single_account( +async fn test_store_and_retrieve_single_account( storage: S, ) -> Result<(), OpProofsStorageError> { let account_key = B256::repeat_byte(0x01); @@ -768,7 +764,7 @@ async fn test_store_and_retrieve_single_account( /// Test account cursor navigation #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_account_cursor_navigation( +async fn test_account_cursor_navigation( storage: S, ) -> Result<(), OpProofsStorageError> { let accounts = [ @@ -805,7 +801,7 @@ async fn test_account_cursor_navigation( /// Test account block versioning #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_account_block_versioning( +async fn test_account_block_versioning( storage: S, ) -> Result<(), OpProofsStorageError> { let account_key = B256::repeat_byte(0x01); @@ -835,7 +831,7 @@ async fn test_account_block_versioning( /// Test store and retrieve storage #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_store_and_retrieve_storage( +async fn test_store_and_retrieve_storage( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -864,7 +860,7 @@ async fn test_store_and_retrieve_storage( /// Test storage cursor navigation #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_storage_cursor_navigation( +async fn test_storage_cursor_navigation( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -895,7 +891,7 @@ async fn test_storage_cursor_navigation( /// Test storage account isolation #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_storage_account_isolation( +async fn test_storage_account_isolation( storage: S, ) -> Result<(), OpProofsStorageError> { let address1 = B256::repeat_byte(0x01); @@ -929,7 +925,7 @@ async fn test_storage_account_isolation( /// Test storage block versioning #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_storage_block_versioning( +async fn test_storage_block_versioning( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -955,7 +951,7 @@ async fn test_storage_block_versioning( /// Test storage zero value deletion #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_storage_zero_value_deletion( +async fn test_storage_zero_value_deletion( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -983,7 +979,7 @@ async fn test_storage_zero_value_deletion( /// Test that zero values are skipped during iteration #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_storage_cursor_skips_zero_values( +async fn test_storage_cursor_skips_zero_values( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -1031,7 +1027,7 @@ async fn test_storage_cursor_skips_zero_values( /// Test empty cursors #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_empty_cursors(storage: S) -> Result<(), OpProofsStorageError> { +async fn test_empty_cursors(storage: S) -> Result<(), OpProofsStorageError> { // Test empty account cursor let mut account_cursor = storage.account_hashed_cursor(100)?; assert!(account_cursor.seek(B256::repeat_byte(0x01))?.is_none()); @@ -1048,7 +1044,7 @@ async fn test_empty_cursors(storage: S) -> Result<(), OpProo /// Test cursor boundary conditions #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_cursor_boundary_conditions( +async fn test_cursor_boundary_conditions( storage: S, ) -> Result<(), OpProofsStorageError> { let account_key = B256::repeat_byte(0x80); // Middle value @@ -1076,7 +1072,7 @@ async fn test_cursor_boundary_conditions( /// Test large batch operations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_large_batch_operations( +async fn test_large_batch_operations( storage: S, ) -> Result<(), OpProofsStorageError> { // Create large batch of accounts @@ -1113,7 +1109,7 @@ async fn test_large_batch_operations( /// it should iterate all existing values for that address and create deletion entries for them. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_store_trie_updates_with_wiped_storage( +async fn test_store_trie_updates_with_wiped_storage( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::HashedStorage; @@ -1198,7 +1194,7 @@ async fn test_store_trie_updates_with_wiped_storage( /// through the cursor APIs. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_store_trie_updates_comprehensive( +async fn test_store_trie_updates_comprehensive( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::{updates::StorageTrieUpdates, HashedStorage}; @@ -1366,7 +1362,7 @@ async fn test_store_trie_updates_comprehensive( /// (`hashed_accounts`, `hashed_storages`, `account_branches`, `storage_branches`). #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_replace_updates_applies_all_updates( +async fn test_replace_updates_applies_all_updates( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::{updates::StorageTrieUpdates, HashedStorage}; @@ -1602,7 +1598,7 @@ async fn test_replace_updates_applies_all_updates( /// it is properly stored as a deletion and subsequent queries return None for that path. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_pure_deletions_stored_correctly( +async fn test_pure_deletions_stored_correctly( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::updates::StorageTrieUpdates; @@ -1730,7 +1726,7 @@ async fn test_pure_deletions_stored_correctly( /// when processing trie updates that both remove and update the same node. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[tokio::test] -async fn test_updates_take_precedence_over_removals( +async fn test_updates_take_precedence_over_removals( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::updates::StorageTrieUpdates; diff --git a/crates/optimism/trie/tests/live.rs b/crates/optimism/trie/tests/live.rs index 894b3602b58..3aa7568d047 100644 --- a/crates/optimism/trie/tests/live.rs +++ b/crates/optimism/trie/tests/live.rs @@ -1,415 +1,395 @@ //! End-to-end test of the live trie collector. -#[cfg(test)] -mod tests { - use alloy_consensus::{constants::ETH_TO_WEI, BlockHeader, Header, TxEip2930}; - use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{Address, TxKind, B256, U256}; - use reth_chainspec::{ - ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS, - }; - use reth_db::Database; - use reth_db_common::init::init_genesis; - use reth_ethereum_primitives::{Block, BlockBody, Receipt, TransactionSigned}; - use reth_evm::{execute::Executor, ConfigureEvm}; - use reth_evm_ethereum::EthEvmConfig; - use reth_node_api::{FullNodePrimitives, NodeTypesWithDB}; - use reth_optimism_trie::{ - backfill::BackfillJob, in_memory::InMemoryProofsStorage, live::LiveTrieCollector, - }; - use reth_primitives_traits::{ - crypto::secp256k1::public_key_to_address, Block as _, RecoveredBlock, - }; - use reth_provider::{ - providers::{BlockchainProvider, ProviderNodeTypes}, - test_utils::create_test_provider_factory_with_chain_spec, - BlockWriter as _, ExecutionOutcome, HashedPostStateProvider, LatestStateProviderRef, - ProviderFactory, StateRootProvider, - }; - use reth_revm::database::StateProviderDatabase; - use reth_testing_utils::generators::sign_tx_with_key_pair; - use secp256k1::{rand::thread_rng, Keypair, Secp256k1}; - use std::sync::Arc; - - /// Specification for a transaction within a block - #[derive(Debug, Clone)] - struct TxSpec { - /// Recipient address for the transaction - to: Address, - /// Value to transfer (in wei) - value: U256, - /// Nonce for the transaction (will be automatically assigned if None) - nonce: Option, - } - - impl TxSpec { - /// Create a simple transfer transaction - fn transfer(to: Address, value: U256) -> Self { - Self { to, value, nonce: None } - } - } +#![cfg(not(feature = "metrics"))] // todo: this can be removed with smol fixes +#![expect(unused_crate_dependencies)] + +use alloy_consensus::{constants::ETH_TO_WEI, BlockHeader, Header, TxEip2930}; +use alloy_genesis::{Genesis, GenesisAccount}; +use alloy_primitives::{Address, TxKind, B256, U256}; +use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS}; +use reth_db::Database; +use reth_db_common::init::init_genesis; +use reth_ethereum_primitives::{Block, BlockBody, Receipt, TransactionSigned}; +use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_evm_ethereum::EthEvmConfig; +use reth_node_api::{FullNodePrimitives, NodeTypesWithDB}; +use reth_optimism_trie::{ + backfill::BackfillJob, in_memory::InMemoryProofsStorage, live::LiveTrieCollector, +}; +use reth_primitives_traits::{ + crypto::secp256k1::public_key_to_address, Block as _, RecoveredBlock, +}; +use reth_provider::{ + providers::{BlockchainProvider, ProviderNodeTypes}, + test_utils::create_test_provider_factory_with_chain_spec, + BlockWriter as _, ExecutionOutcome, HashedPostStateProvider, LatestStateProviderRef, + ProviderFactory, StateRootProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_testing_utils::generators::sign_tx_with_key_pair; +use secp256k1::{rand::thread_rng, Keypair, Secp256k1}; +use std::sync::Arc; + +/// Specification for a transaction within a block +#[derive(Debug, Clone)] +struct TxSpec { + /// Recipient address for the transaction + to: Address, + /// Value to transfer (in wei) + value: U256, + /// Nonce for the transaction (will be automatically assigned if None) + nonce: Option, +} - /// Specification for a block in the test chain - #[derive(Debug, Clone)] - struct BlockSpec { - /// Transactions to include in this block - txs: Vec, +impl TxSpec { + /// Create a simple transfer transaction + fn transfer(to: Address, value: U256) -> Self { + Self { to, value, nonce: None } } +} - impl BlockSpec { - /// Create a block spec with the given transactions - fn new(txs: Vec) -> Self { - Self { txs } - } - } +/// Specification for a block in the test chain +#[derive(Debug, Clone)] +struct BlockSpec { + /// Transactions to include in this block + txs: Vec, +} - /// Configuration for a test scenario - #[derive(Debug)] - struct TestScenario { - /// Blocks to execute before running the backfill job - blocks_before_backfill: Vec, - /// Blocks to execute after backfill using the live collector - blocks_after_backfill: Vec, +impl BlockSpec { + /// Create a block spec with the given transactions + fn new(txs: Vec) -> Self { + Self { txs } } +} - impl TestScenario { - /// Create a simple scenario with blocks before and after backfill - fn new( - blocks_before_backfill: Vec, - blocks_after_backfill: Vec, - ) -> Self { - Self { blocks_before_backfill, blocks_after_backfill } - } - } +/// Configuration for a test scenario +#[derive(Debug)] +struct TestScenario { + /// Blocks to execute before running the backfill job + blocks_before_backfill: Vec, + /// Blocks to execute after backfill using the live collector + blocks_after_backfill: Vec, +} - /// Helper to create a chain spec with a genesis account funded - fn chain_spec_with_address(address: Address) -> Arc { - Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(Genesis { - alloc: [( - address, - GenesisAccount { - balance: U256::from(10 * ETH_TO_WEI), - ..Default::default() - }, - )] - .into(), - ..MAINNET.genesis.clone() - }) - .paris_activated() - .build(), - ) +impl TestScenario { + /// Create a simple scenario with blocks before and after backfill + fn new(blocks_before_backfill: Vec, blocks_after_backfill: Vec) -> Self { + Self { blocks_before_backfill, blocks_after_backfill } } +} - /// Creates a block from a spec, executing transactions with the given keypair - fn create_block_from_spec( - spec: &BlockSpec, - block_number: u64, - parent_hash: B256, - chain_spec: &Arc, - key_pair: Keypair, - nonce_counter: &mut u64, - ) -> RecoveredBlock { - let transactions: Vec = spec - .txs - .iter() - .map(|tx_spec| { - let nonce = tx_spec.nonce.unwrap_or_else(|| { - let current = *nonce_counter; - *nonce_counter += 1; - current - }); - - sign_tx_with_key_pair( - key_pair, - TxEip2930 { - chain_id: chain_spec.chain.id(), - nonce, - gas_limit: MIN_TRANSACTION_GAS, - gas_price: 1_500_000_000, - to: TxKind::Call(tx_spec.to), - value: tx_spec.value, - ..Default::default() - } - .into(), - ) +/// Helper to create a chain spec with a genesis account funded +fn chain_spec_with_address(address: Address) -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: [( + address, + GenesisAccount { balance: U256::from(10 * ETH_TO_WEI), ..Default::default() }, + )] + .into(), + ..MAINNET.genesis.clone() }) - .collect(); - - let gas_total = transactions.len() as u64 * MIN_TRANSACTION_GAS; - - Block { - header: Header { - parent_hash, - receipts_root: alloy_primitives::b256!( - "0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" - ), - difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), - number: block_number, - gas_limit: gas_total.max(MIN_TRANSACTION_GAS), - gas_used: gas_total, - state_root: B256::ZERO, // Will be calculated by executor - ..Default::default() - }, - body: BlockBody { transactions, ..Default::default() }, - } - .try_into_recovered() - .unwrap() - } - - /// Executes a block and returns the updated block with correct state root - fn execute_block( - block: &mut RecoveredBlock, - provider_factory: &ProviderFactory, - chain_spec: &Arc, - ) -> eyre::Result> - where - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - Block = Block, - BlockBody = BlockBody, - Receipt = Receipt, - >, - > + NodeTypesWithDB, - { - let provider = provider_factory.provider()?; - let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); - let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); - let block_executor = evm_config.batch_executor(db); + .paris_activated() + .build(), + ) +} - let execution_result = block_executor.execute(block)?; +/// Creates a block from a spec, executing transactions with the given keypair +fn create_block_from_spec( + spec: &BlockSpec, + block_number: u64, + parent_hash: B256, + chain_spec: &Arc, + key_pair: Keypair, + nonce_counter: &mut u64, +) -> RecoveredBlock { + let transactions: Vec = spec + .txs + .iter() + .map(|tx_spec| { + let nonce = tx_spec.nonce.unwrap_or_else(|| { + let current = *nonce_counter; + *nonce_counter += 1; + current + }); + + sign_tx_with_key_pair( + key_pair, + TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce, + gas_limit: MIN_TRANSACTION_GAS, + gas_price: 1_500_000_000, + to: TxKind::Call(tx_spec.to), + value: tx_spec.value, + ..Default::default() + } + .into(), + ) + }) + .collect(); + + let gas_total = transactions.len() as u64 * MIN_TRANSACTION_GAS; + + Block { + header: Header { + parent_hash, + receipts_root: alloy_primitives::b256!( + "0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: block_number, + gas_limit: gas_total.max(MIN_TRANSACTION_GAS), + gas_used: gas_total, + state_root: B256::ZERO, // Will be calculated by executor + ..Default::default() + }, + body: BlockBody { transactions, ..Default::default() }, + } + .try_into_recovered() + .unwrap() +} - let hashed_state = - LatestStateProviderRef::new(&provider).hashed_post_state(&execution_result.state); - let state_root = LatestStateProviderRef::new(&provider).state_root(hashed_state)?; +/// Executes a block and returns the updated block with correct state root +fn execute_block( + block: &mut RecoveredBlock, + provider_factory: &ProviderFactory, + chain_spec: &Arc, +) -> eyre::Result> +where + N: ProviderNodeTypes< + Primitives: FullNodePrimitives, + > + NodeTypesWithDB, +{ + let provider = provider_factory.provider()?; + let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + let block_executor = evm_config.batch_executor(db); + + let execution_result = block_executor.execute(block)?; + + let hashed_state = + LatestStateProviderRef::new(&provider).hashed_post_state(&execution_result.state); + let state_root = LatestStateProviderRef::new(&provider).state_root(hashed_state)?; + + block.set_state_root(state_root); + + Ok(execution_result) +} - block.set_state_root(state_root); +/// Commits a block and its execution output to the database +fn commit_block_to_database( + block: &RecoveredBlock, + execution_output: &reth_evm::execute::BlockExecutionOutput, + provider_factory: &ProviderFactory, +) -> eyre::Result<()> +where + N: ProviderNodeTypes< + Primitives: FullNodePrimitives, + > + NodeTypesWithDB, +{ + let execution_outcome = ExecutionOutcome { + bundle: execution_output.state.clone(), + receipts: vec![execution_output.receipts.clone()], + first_block: block.number(), + requests: vec![execution_output.requests.clone()], + }; - Ok(execution_result) - } + // Calculate hashed state from execution result + let state_provider = provider_factory.provider()?; + let hashed_state = HashedPostStateProvider::hashed_post_state( + &LatestStateProviderRef::new(&state_provider), + &execution_output.state, + ); + + let provider_rw = provider_factory.provider_rw()?; + provider_rw.append_blocks_with_state( + vec![block.clone()], + &execution_outcome, + hashed_state.into_sorted(), + )?; + provider_rw.commit()?; + + Ok(()) +} - /// Commits a block and its execution output to the database - fn commit_block_to_database( - block: &RecoveredBlock, - execution_output: &reth_evm::execute::BlockExecutionOutput, - provider_factory: &ProviderFactory, - ) -> eyre::Result<()> - where - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - Block = Block, - BlockBody = BlockBody, - Receipt = Receipt, - >, - > + NodeTypesWithDB, - { - let execution_outcome = ExecutionOutcome { - bundle: execution_output.state.clone(), - receipts: vec![execution_output.receipts.clone()], - first_block: block.number(), - requests: vec![execution_output.requests.clone()], - }; - - // Calculate hashed state from execution result - let state_provider = provider_factory.provider()?; - let hashed_state = HashedPostStateProvider::hashed_post_state( - &LatestStateProviderRef::new(&state_provider), - &execution_output.state, +/// Runs a test scenario with the given configuration +async fn run_test_scenario( + scenario: TestScenario, + provider_factory: ProviderFactory, + chain_spec: Arc, + key_pair: Keypair, + storage: Arc, +) -> eyre::Result<()> +where + N: ProviderNodeTypes< + Primitives: FullNodePrimitives, + > + NodeTypesWithDB, +{ + let genesis_hash = chain_spec.genesis_hash(); + let mut nonce_counter = 0u64; + let mut last_block_hash = genesis_hash; + let mut last_block_number = 0u64; + + // Execute blocks before backfill + for (idx, block_spec) in scenario.blocks_before_backfill.iter().enumerate() { + let block_number = idx as u64 + 1; + let mut block = create_block_from_spec( + block_spec, + block_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, ); - let provider_rw = provider_factory.provider_rw()?; - provider_rw.append_blocks_with_state( - vec![block.clone()], - &execution_outcome, - hashed_state.into_sorted(), - )?; - provider_rw.commit()?; + let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; + commit_block_to_database(&block, &execution_output, &provider_factory)?; - Ok(()) + last_block_hash = block.hash(); + last_block_number = block_number; } - /// Runs a test scenario with the given configuration - async fn run_test_scenario( - scenario: TestScenario, - provider_factory: ProviderFactory, - chain_spec: Arc, - key_pair: Keypair, - storage: Arc, - ) -> eyre::Result<()> - where - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - Block = Block, - BlockBody = BlockBody, - Receipt = Receipt, - >, - > + NodeTypesWithDB, { - let genesis_hash = chain_spec.genesis_hash(); - let mut nonce_counter = 0u64; - let mut last_block_hash = genesis_hash; - let mut last_block_number = 0u64; - - // Execute blocks before backfill - for (idx, block_spec) in scenario.blocks_before_backfill.iter().enumerate() { - let block_number = idx as u64 + 1; - let mut block = create_block_from_spec( - block_spec, - block_number, - last_block_hash, - &chain_spec, - key_pair, - &mut nonce_counter, - ); - - let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; - commit_block_to_database(&block, &execution_output, &provider_factory)?; - - last_block_hash = block.hash(); - last_block_number = block_number; - } - - { - let provider = provider_factory.db_ref(); - let tx = provider.tx()?; - let backfill_job = BackfillJob::new(storage.clone(), &tx); - backfill_job.run(last_block_number, last_block_hash).await?; - } - - // Execute blocks after backfill using live collector - let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); - - for (idx, block_spec) in scenario.blocks_after_backfill.iter().enumerate() { - let block_number = last_block_number + idx as u64 + 1; - let mut block = create_block_from_spec( - block_spec, - block_number, - last_block_hash, - &chain_spec, - key_pair, - &mut nonce_counter, - ); - - // Execute the block to get the correct state root - let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; - - // Create a fresh blockchain provider to ensure it sees all committed blocks - let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; - let live_trie_collector = - LiveTrieCollector::new(evm_config.clone(), blockchain_db, &storage); + let provider = provider_factory.db_ref(); + let tx = provider.tx()?; + let backfill_job = BackfillJob::new(storage.clone(), &tx); + backfill_job.run(last_block_number, last_block_hash).await?; + } - // Use the live collector to execute and store trie updates - live_trie_collector.execute_and_store_block_updates(&block).await?; + // Execute blocks after backfill using live collector + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + + for (idx, block_spec) in scenario.blocks_after_backfill.iter().enumerate() { + let block_number = last_block_number + idx as u64 + 1; + let mut block = create_block_from_spec( + block_spec, + block_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); - // Commit the block to the database so subsequent blocks can build on it - commit_block_to_database(&block, &execution_output, &provider_factory)?; + // Execute the block to get the correct state root + let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; - last_block_hash = block.hash(); - } + // Create a fresh blockchain provider to ensure it sees all committed blocks + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; + let live_trie_collector = + LiveTrieCollector::new(evm_config.clone(), blockchain_db, &storage); - Ok(()) - } + // Use the live collector to execute and store trie updates + live_trie_collector.execute_and_store_block_updates(&block).await?; - /// End-to-end test of a single live collector iteration. - /// (1) Creates a chain with some state - /// (2) Stores the genesis state into storage via backfill - /// (3) Executes a block and calculates the state root using the stored state - #[tokio::test] - async fn test_execute_and_store_block_updates() { - let storage = Arc::new(InMemoryProofsStorage::new()); - - // Create a keypair for signing transactions - let secp = Secp256k1::new(); - let key_pair = Keypair::new(&secp, &mut thread_rng()); - let sender = public_key_to_address(key_pair.public_key()); - - // Create chain spec with the sender address funded in genesis - let chain_spec = chain_spec_with_address(sender); - - // Create test database and provider factory - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - - // Insert genesis state into the database - init_genesis(&provider_factory).unwrap(); - - // Define the test scenario: - // - No blocks before backfill - // - Backfill to genesis (block 0) - // - Execute one block with a single transaction after backfill - let recipient = Address::repeat_byte(0x42); - let scenario = TestScenario::new( - vec![], - vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], - ); + // Commit the block to the database so subsequent blocks can build on it + commit_block_to_database(&block, &execution_output, &provider_factory)?; - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); + last_block_hash = block.hash(); } - /// Test with multiple blocks before and after backfill - #[tokio::test] - async fn test_multiple_blocks_before_and_after_backfill() { - let storage = Arc::new(InMemoryProofsStorage::new()); - - let secp = Secp256k1::new(); - let key_pair = Keypair::new(&secp, &mut thread_rng()); - let sender = public_key_to_address(key_pair.public_key()); - - let chain_spec = chain_spec_with_address(sender); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - init_genesis(&provider_factory).unwrap(); - - // Define the test scenario: - // - Execute 3 blocks before backfill (will be committed to db) - // - Backfill to block 3 - // - Execute 2 more blocks using the live collector - let recipient1 = Address::repeat_byte(0x42); - let recipient2 = Address::repeat_byte(0x43); - let recipient3 = Address::repeat_byte(0x44); - - let scenario = TestScenario::new( - vec![ - BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(1))]), - BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(2))]), - BlockSpec::new(vec![TxSpec::transfer(recipient3, U256::from(3))]), - ], - vec![ - BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(4))]), - BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(5))]), - ], - ); + Ok(()) +} - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); - } +/// End-to-end test of a single live collector iteration. +/// (1) Creates a chain with some state +/// (2) Stores the genesis state into storage via backfill +/// (3) Executes a block and calculates the state root using the stored state +#[tokio::test] +async fn test_execute_and_store_block_updates() { + let storage = Arc::new(InMemoryProofsStorage::new()); + + // Create a keypair for signing transactions + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + // Create chain spec with the sender address funded in genesis + let chain_spec = chain_spec_with_address(sender); + + // Create test database and provider factory + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + + // Insert genesis state into the database + init_genesis(&provider_factory).unwrap(); + + // Define the test scenario: + // - No blocks before backfill + // - Backfill to genesis (block 0) + // - Execute one block with a single transaction after backfill + let recipient = Address::repeat_byte(0x42); + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); +} - /// Test with blocks containing multiple transactions - #[tokio::test] - async fn test_blocks_with_multiple_transactions() { - let storage = Arc::new(InMemoryProofsStorage::new()); - - let secp = Secp256k1::new(); - let key_pair = Keypair::new(&secp, &mut thread_rng()); - let sender = public_key_to_address(key_pair.public_key()); - - let chain_spec = chain_spec_with_address(sender); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - init_genesis(&provider_factory).unwrap(); - - let recipient1 = Address::repeat_byte(0x42); - let recipient2 = Address::repeat_byte(0x43); - let recipient3 = Address::repeat_byte(0x44); - - // Block with 3 transactions - let scenario = TestScenario::new( - vec![], - vec![BlockSpec::new(vec![ - TxSpec::transfer(recipient1, U256::from(1)), - TxSpec::transfer(recipient2, U256::from(2)), - TxSpec::transfer(recipient3, U256::from(3)), - ])], - ); +/// Test with multiple blocks before and after backfill +#[tokio::test] +async fn test_multiple_blocks_before_and_after_backfill() { + let storage = Arc::new(InMemoryProofsStorage::new()); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + // Define the test scenario: + // - Execute 3 blocks before backfill (will be committed to db) + // - Backfill to block 3 + // - Execute 2 more blocks using the live collector + let recipient1 = Address::repeat_byte(0x42); + let recipient2 = Address::repeat_byte(0x43); + let recipient3 = Address::repeat_byte(0x44); + + let scenario = TestScenario::new( + vec![ + BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(1))]), + BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(2))]), + BlockSpec::new(vec![TxSpec::transfer(recipient3, U256::from(3))]), + ], + vec![ + BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(4))]), + BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(5))]), + ], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); +} - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); - } +/// Test with blocks containing multiple transactions +#[tokio::test] +async fn test_blocks_with_multiple_transactions() { + let storage = Arc::new(InMemoryProofsStorage::new()); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut thread_rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + let recipient1 = Address::repeat_byte(0x42); + let recipient2 = Address::repeat_byte(0x43); + let recipient3 = Address::repeat_byte(0x44); + + // Block with 3 transactions + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![ + TxSpec::transfer(recipient1, U256::from(1)), + TxSpec::transfer(recipient2, U256::from(2)), + TxSpec::transfer(recipient3, U256::from(3)), + ])], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); }