diff --git a/crates/optimism/cli/src/commands/op_proofs/init.rs b/crates/optimism/cli/src/commands/op_proofs/init.rs index 62846182cd7..be366e0a746 100644 --- a/crates/optimism/cli/src/commands/op_proofs/init.rs +++ b/crates/optimism/cli/src/commands/op_proofs/init.rs @@ -54,7 +54,7 @@ impl> InitCommand { .into(); // Check if already initialized - if let Some((block_number, block_hash)) = storage.get_earliest_block_number().await? { + if let Some((block_number, block_hash)) = storage.get_earliest_block_number()? { info!( target: "reth::cli", block_number = block_number, @@ -80,7 +80,7 @@ impl> InitCommand { provider_factory.database_provider_ro()?.disable_long_read_transaction_safety(); let db_tx = db_provider.into_tx(); - InitializationJob::new(storage.clone(), db_tx).run(best_number, best_hash).await?; + InitializationJob::new(storage, db_tx).run(best_number, best_hash)?; } info!( diff --git a/crates/optimism/cli/src/commands/op_proofs/prune.rs b/crates/optimism/cli/src/commands/op_proofs/prune.rs index 48d30e1ab2c..f6ef1f933ce 100644 --- a/crates/optimism/cli/src/commands/op_proofs/prune.rs +++ b/crates/optimism/cli/src/commands/op_proofs/prune.rs @@ -62,8 +62,8 @@ impl> PruneCommand { ) .into(); - let earliest_block = storage.get_earliest_block_number().await?; - let latest_block = storage.get_latest_block_number().await?; + let earliest_block = storage.get_earliest_block_number()?; + let latest_block = storage.get_latest_block_number()?; info!( target: "reth::cli", ?earliest_block, @@ -77,7 +77,7 @@ impl> PruneCommand { self.proofs_history_window, self.proofs_history_prune_batch_size, ); - pruner.run().await; + pruner.run(); Ok(()) } } diff --git a/crates/optimism/cli/src/commands/op_proofs/unwind.rs b/crates/optimism/cli/src/commands/op_proofs/unwind.rs index 0d4e67bf60a..c73a40a91f9 100644 --- a/crates/optimism/cli/src/commands/op_proofs/unwind.rs +++ b/crates/optimism/cli/src/commands/op_proofs/unwind.rs @@ -36,12 +36,12 @@ pub struct UnwindCommand { impl UnwindCommand { /// Validates that the target block number is within a valid range for unwinding. - async fn validate_unwind_range( + fn validate_unwind_range( &self, storage: &OpProofsStorage, ) -> eyre::Result { let (Some((earliest, _)), Some((latest, _))) = - (storage.get_earliest_block_number().await?, storage.get_latest_block_number().await?) + (storage.get_earliest_block_number()?, storage.get_latest_block_number()?) else { warn!(target: "reth::cli", "No blocks found in proofs storage. Nothing to unwind."); return Ok(false); @@ -80,7 +80,7 @@ impl> UnwindCommand { .into(); // Validate that the target block is within a valid range for unwinding - if !self.validate_unwind_range(&storage).await? { + if !self.validate_unwind_range(&storage)? { return Ok(()); } @@ -92,7 +92,7 @@ impl> UnwindCommand { })?; info!(target: "reth::cli", block_number = block.number, block_hash = %block.hash(), "Unwinding to target block"); - storage.unwind_history(block.block_with_parent()).await?; + storage.unwind_history(block.block_with_parent())?; Ok(()) } diff --git a/crates/optimism/exex/src/lib.rs b/crates/optimism/exex/src/lib.rs index e957e1a9cd6..93cc5455406 100644 --- a/crates/optimism/exex/src/lib.rs +++ b/crates/optimism/exex/src/lib.rs @@ -157,7 +157,7 @@ where { /// Main execution loop for the ExEx pub async fn run(mut self) -> eyre::Result<()> { - self.ensure_initialized().await?; + self.ensure_initialized()?; let sync_target_tx = self.spawn_sync_task(); let prune_task = OpProofStoragePrunerTask::new( @@ -177,16 +177,16 @@ where ); while let Some(notification) = self.ctx.notifications.try_next().await? { - self.handle_notification(notification, &collector, &sync_target_tx).await?; + self.handle_notification(notification, &collector, &sync_target_tx)?; } Ok(()) } /// Ensure proofs storage is initialized - async fn ensure_initialized(&self) -> eyre::Result<()> { + fn ensure_initialized(&self) -> eyre::Result<()> { // Check if proofs storage is initialized - let earliest_block_number = match self.storage.get_earliest_block_number().await? { + let earliest_block_number = match self.storage.get_earliest_block_number()? { Some((n, _)) => n, None => { return Err(eyre::eyre!( @@ -195,7 +195,7 @@ where } }; - let latest_block_number = match self.storage.get_latest_block_number().await? { + let latest_block_number = match self.storage.get_latest_block_number()? { Some((n, _)) => n, None => { return Err(eyre::eyre!( @@ -267,7 +267,7 @@ where loop { let target = *sync_target_rx.borrow_and_update(); - let latest = match storage.get_latest_block_number().await { + let latest = match storage.get_latest_block_number() { Ok(Some((n, _))) => n, Ok(None) => { error!(target: "optimism::exex", "No blocks stored in proofs storage during sync loop"); @@ -287,7 +287,6 @@ where // Process one batch if let Err(e) = Self::process_batch(latest, target, &provider, collector, SYNC_BLOCKS_BATCH_SIZE) - .await { error!(target: "optimism::exex", error = ?e, "Batch processing failed"); } @@ -299,7 +298,7 @@ where } /// Process a batch of blocks from start to target (up to `batch_size`) - async fn process_batch( + fn process_batch( start: u64, target: u64, provider: &Node::Provider, @@ -319,19 +318,19 @@ where .recovered_block(block_num.into(), TransactionVariant::NoHash)? .ok_or_else(|| eyre::eyre!("Missing block {}", block_num))?; - collector.execute_and_store_block_updates(&block).await?; + collector.execute_and_store_block_updates(&block)?; } Ok(()) } - async fn handle_notification( + fn handle_notification( &self, notification: ExExNotification, collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, sync_target_tx: &watch::Sender, ) -> eyre::Result<()> { - let latest_stored = match self.storage.get_latest_block_number().await? { + let latest_stored = match self.storage.get_latest_block_number()? { Some((n, _)) => n, None => { return Err(eyre::eyre!("No blocks stored in proofs storage")); @@ -340,15 +339,13 @@ where match ¬ification { ExExNotification::ChainCommitted { new } => { - self.handle_chain_committed(new.clone(), latest_stored, collector, sync_target_tx) - .await? + self.handle_chain_committed(new.clone(), latest_stored, collector, sync_target_tx)? } ExExNotification::ChainReorged { old, new } => { - self.handle_chain_reorged(old.clone(), new.clone(), latest_stored, collector) - .await? + self.handle_chain_reorged(old.clone(), new.clone(), latest_stored, collector)? } ExExNotification::ChainReverted { old } => { - self.handle_chain_reverted(old.clone(), latest_stored, collector).await? + self.handle_chain_reverted(old.clone(), latest_stored, collector)? } } @@ -359,7 +356,7 @@ where Ok(()) } - async fn handle_chain_committed( + fn handle_chain_committed( &self, new: Arc>, latest_stored: u64, @@ -401,7 +398,7 @@ where // Process each block from latest_stored + 1 to tip let start = latest_stored.saturating_add(1); for block_number in start..=new.tip().number() { - self.process_block(block_number, &new, collector).await?; + self.process_block(block_number, &new, collector)?; } } else { debug!( @@ -422,7 +419,7 @@ where } /// Process a single block - either from chain or provider - async fn process_block( + fn process_block( &self, block_number: u64, chain: &Chain, @@ -449,13 +446,11 @@ where "Using pre-computed state updates from notification" ); - collector - .store_block_updates( - block.block_with_parent(), - (**trie_updates).clone(), - (**hashed_state).clone(), - ) - .await?; + collector.store_block_updates( + block.block_with_parent(), + (**trie_updates).clone(), + (**hashed_state).clone(), + )?; return Ok(()); } @@ -488,11 +483,11 @@ where .recovered_block(block_number.into(), TransactionVariant::NoHash)? .ok_or_else(|| eyre::eyre!("Missing block {} in provider", block_number))?; - collector.execute_and_store_block_updates(&block).await?; + collector.execute_and_store_block_updates(&block)?; Ok(()) } - async fn handle_chain_reorged( + fn handle_chain_reorged( &self, old: Arc>, new: Arc>, @@ -548,12 +543,12 @@ where )); } - collector.unwind_and_store_block_updates(block_updates).await?; + collector.unwind_and_store_block_updates(block_updates)?; Ok(()) } - async fn handle_chain_reverted( + fn handle_chain_reverted( &self, old: Arc>, latest_stored: u64, @@ -576,7 +571,7 @@ where return Ok(()); } - collector.unwind_history(old.first().block_with_parent()).await?; + collector.unwind_history(old.first().block_with_parent())?; Ok(()) } } @@ -655,18 +650,16 @@ mod tests { } // Init_storage to the genesis block - async fn init_storage(storage: OpProofsStorage) { + fn init_storage(storage: OpProofsStorage) { let genesis_block = NumHash::new(0, b256(0x00)); storage .set_earliest_block_number(genesis_block.number, genesis_block.hash) - .await .expect("set earliest"); storage .store_trie_updates( BlockWithParent::new(genesis_block.hash, genesis_block), BlockStateDiff::default(), ) - .await .expect("store trie update"); } @@ -689,7 +682,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); @@ -707,12 +700,9 @@ mod tests { let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); - exex.handle_notification(notif, &collector, &sync_target_tx) - .await - .expect("handle chain commit"); + exex.handle_notification(notif, &collector, &sync_target_tx).expect("handle chain commit"); - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 1); } @@ -723,7 +713,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); @@ -742,21 +732,17 @@ mod tests { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain commit"); } - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 5); // Try to handle already processed notification let new_chain = Arc::new(mk_chain_with_updates(5, 5, Some(hash_for_num(10)))); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) - .await - .expect("handle chain commit"); - let latest = proofs.get_latest_block_number().await.expect("get latest block").expect("ok"); + exex.handle_notification(notif, &collector, &sync_target_tx).expect("handle chain commit"); + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok"); assert_eq!(latest.0, 5); assert_eq!(latest.1, hash_for_num(5)); // block was not updated } @@ -768,7 +754,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); @@ -787,12 +773,10 @@ mod tests { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain commit"); } - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); // Now the tip is 10, and we want to reorg from block 6..12 @@ -803,10 +787,8 @@ mod tests { let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain re-orged"); - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 12); } @@ -817,7 +799,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); @@ -837,12 +819,10 @@ mod tests { let notif = ExExNotification::ChainCommitted { new: new_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain commit"); } - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); // Now the tip is 10, and we want to reorg from block 12..15 @@ -853,10 +833,8 @@ mod tests { let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain re-orged"); - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); } @@ -867,7 +845,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); @@ -887,12 +865,10 @@ mod tests { let notif = ExExNotification::ChainCommitted { new: new_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain commit"); } - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); // Now the tip is 10, and we want to revert from block 9..10 @@ -902,10 +878,8 @@ mod tests { let notif = ExExNotification::ChainReverted { old: old_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain reverted"); - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 8); } @@ -916,7 +890,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); @@ -936,12 +910,10 @@ mod tests { let notif = ExExNotification::ChainCommitted { new: new_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain commit"); } - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 5); // Now the tip is 10, and we want to revert from block 9..10 @@ -951,10 +923,8 @@ mod tests { let notif = ExExNotification::ChainReverted { old: old_chain }; exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain reverted"); - let latest = - proofs.get_latest_block_number().await.expect("get latest block").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 5); } @@ -969,7 +939,7 @@ mod tests { reth_exex_test_utils::test_exex_context().await.expect("exex test context"); let exex = build_test_exex(ctx, proofs.clone()); - let _ = exex.ensure_initialized().await.expect_err("should return error"); + let _ = exex.ensure_initialized().expect_err("should return error"); } #[tokio::test] @@ -979,7 +949,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); for i in 1..1100 { proofs @@ -990,7 +960,6 @@ mod tests { ), BlockStateDiff::default(), ) - .await .expect("store trie update"); } @@ -998,7 +967,7 @@ mod tests { reth_exex_test_utils::test_exex_context().await.expect("exex test context"); let exex = build_test_exex(ctx, proofs.clone()); - let _ = exex.ensure_initialized().await.expect_err("should return error"); + let _ = exex.ensure_initialized().expect_err("should return error"); } #[tokio::test] @@ -1008,18 +977,18 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); let exex = build_test_exex(ctx, proofs.clone()); - exex.ensure_initialized().await.expect("should not return error"); + exex.ensure_initialized().expect("should not return error"); } #[tokio::test] async fn handle_notification_errors_on_empty_storage() { - // MDBX proofs storage - empty + // MDBX proofs storage let dir = tempdir_path(); let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); @@ -1040,7 +1009,7 @@ mod tests { let notif = ExExNotification::ChainCommitted { new: new_chain }; let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); - let err = exex.handle_notification(notif, &collector, &sync_target_tx).await.unwrap_err(); + let err = exex.handle_notification(notif, &collector, &sync_target_tx).unwrap_err(); assert_eq!(err.to_string(), "No blocks stored in proofs storage"); } @@ -1051,7 +1020,7 @@ mod tests { let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); let proofs: OpProofsStorage> = store.clone().into(); - init_storage(proofs.clone()).await; + init_storage(proofs.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); @@ -1071,7 +1040,6 @@ mod tests { // Process notification exex.handle_notification(notif, &collector, &sync_target_tx) - .await .expect("handle chain commit should return ok immediately"); // Verify async signal was sent @@ -1086,7 +1054,7 @@ mod tests { // Because we didn't spawn the actual worker thread in this test, storage should still be at // 0. This proves the 'handle_notification' returned instantly without doing the // heavy lifting. - let latest = proofs.get_latest_block_number().await.expect("get").expect("ok").0; + let latest = proofs.get_latest_block_number().expect("get").expect("ok").0; assert_eq!(latest, 0, "Main thread should not have processed the blocks synchronously"); } } diff --git a/crates/optimism/rpc/src/debug.rs b/crates/optimism/rpc/src/debug.rs index 48dcb75dc88..c6fe91148de 100644 --- a/crates/optimism/rpc/src/debug.rs +++ b/crates/optimism/rpc/src/debug.rs @@ -320,13 +320,11 @@ where .inner .storage .get_earliest_block_number() - .await .map_err(|err| internal_rpc_err(err.to_string()))?; let latest = self .inner .storage .get_latest_block_number() - .await .map_err(|err| internal_rpc_err(err.to_string()))?; Ok(ProofsSyncStatus { diff --git a/crates/optimism/rpc/src/state.rs b/crates/optimism/rpc/src/state.rs index a4b683f6fe9..b4e4a744329 100644 --- a/crates/optimism/rpc/src/state.rs +++ b/crates/optimism/rpc/src/state.rs @@ -41,11 +41,9 @@ where let (Some((latest_block_number, _)), Some((earliest_block_number, _))) = ( self.preimage_store .get_latest_block_number() - .await .map_err(|e| ProviderError::Database(e.into()))?, self.preimage_store .get_earliest_block_number() - .await .map_err(|e| ProviderError::Database(e.into()))?, ) else { // if no earliest block, db is empty - use historical provider diff --git a/crates/optimism/trie/src/api.rs b/crates/optimism/trie/src/api.rs index 9e3ed55ebd9..bf5868c57c0 100644 --- a/crates/optimism/trie/src/api.rs +++ b/crates/optimism/trie/src/api.rs @@ -92,40 +92,36 @@ pub trait OpProofsStore: Send + Sync + Debug { fn store_account_branches( &self, account_nodes: Vec<(Nibbles, Option)>, - ) -> impl Future> + Send; + ) -> OpProofsStorageResult<()>; /// Store a batch of storage trie branches. Used for saving existing state. fn store_storage_branches( &self, hashed_address: B256, storage_nodes: Vec<(Nibbles, Option)>, - ) -> impl Future> + Send; + ) -> OpProofsStorageResult<()>; /// Store a batch of account trie leaf nodes. Used for saving existing state. fn store_hashed_accounts( &self, accounts: Vec<(B256, Option)>, - ) -> impl Future> + Send; + ) -> OpProofsStorageResult<()>; /// Store a batch of storage trie leaf nodes. Used for saving existing state. fn store_hashed_storages( &self, hashed_address: B256, storages: Vec<(B256, U256)>, - ) -> impl Future> + Send; + ) -> OpProofsStorageResult<()>; /// Get the earliest block number and hash that has been stored /// /// This is used to determine the block number of trie nodes with block number 0. /// All earliest block numbers are stored in 0 to reduce updates required to prune trie nodes. - fn get_earliest_block_number( - &self, - ) -> impl Future>> + Send; + fn get_earliest_block_number(&self) -> OpProofsStorageResult>; /// Get the latest block number and hash that has been stored - fn get_latest_block_number( - &self, - ) -> impl Future>> + Send; + fn get_latest_block_number(&self) -> OpProofsStorageResult>; /// Get a trie cursor for the storage backend fn storage_trie_cursor<'tx>( @@ -161,41 +157,32 @@ pub trait OpProofsStore: Send + Sync + Debug { &self, block_ref: BlockWithParent, block_state_diff: BlockStateDiff, - ) -> impl Future> + Send; + ) -> OpProofsStorageResult; /// Fetch all updates for a given block number. - fn fetch_trie_updates( - &self, - block_number: u64, - ) -> impl Future> + Send; + fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult; /// Applies [`BlockStateDiff`] to the earliest state (updating/deleting nodes) and updates the /// earliest block number. fn prune_earliest_state( &self, new_earliest_block_ref: BlockWithParent, - ) -> impl Future> + Send; + ) -> OpProofsStorageResult; /// Remove account, storage and trie updates from historical storage for all blocks till /// the specified block (inclusive). - fn unwind_history( - &self, - to: BlockWithParent, - ) -> impl Future> + Send; + fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()>; /// Deletes all updates > `latest_common_block` and replaces them with the new updates. fn replace_updates( &self, latest_common_block: BlockNumHash, blocks_to_add: Vec<(BlockWithParent, BlockStateDiff)>, - ) -> impl Future> + Send; + ) -> OpProofsStorageResult<()>; /// Set the earliest block number and hash that has been stored - fn set_earliest_block_number( - &self, - block_number: u64, - hash: B256, - ) -> impl Future> + Send; + fn set_earliest_block_number(&self, block_number: u64, hash: B256) + -> OpProofsStorageResult<()>; } /// Status of the initial state anchor. @@ -232,20 +219,13 @@ pub struct InitialStateAnchor { #[auto_impl(Arc)] pub trait OpProofsInitialStateStore: Send + Sync + Debug { /// Read the current anchor. - fn initial_state_anchor( - &self, - ) -> impl Future> + Send; + fn initial_state_anchor(&self) -> OpProofsStorageResult; /// Create the anchor if it doesn't exist. /// Returns `Err` if an anchor already exists (prevents accidental overwrite). - fn set_initial_state_anchor( - &self, - anchor: BlockNumHash, - ) -> impl Future> + Send; + fn set_initial_state_anchor(&self, anchor: BlockNumHash) -> OpProofsStorageResult<()>; /// Commit the initial state - mark the anchor as completed and also set the earliest block /// number to anchor. - fn commit_initial_state( - &self, - ) -> impl Future> + Send; + fn commit_initial_state(&self) -> OpProofsStorageResult; } diff --git a/crates/optimism/trie/src/db/store.rs b/crates/optimism/trie/src/db/store.rs index 6074acc278b..da3e364f73f 100644 --- a/crates/optimism/trie/src/db/store.rs +++ b/crates/optimism/trie/src/db/store.rs @@ -112,7 +112,7 @@ impl MdbxProofsStorage { Ok(Some(ProofWindowValue { earliest, latest })) } - async fn set_earliest_block_number_hash( + fn set_earliest_block_number_hash( &self, block_number: u64, hash: B256, @@ -636,7 +636,7 @@ impl OpProofsStore for MdbxProofsStorage { where Self: 'tx; - async fn store_account_branches( + fn store_account_branches( &self, account_nodes: Vec<(Nibbles, Option)>, ) -> OpProofsStorageResult<()> { @@ -653,7 +653,7 @@ impl OpProofsStore for MdbxProofsStorage { })? } - async fn store_storage_branches( + fn store_storage_branches( &self, hashed_address: B256, storage_nodes: Vec<(Nibbles, Option)>, @@ -676,7 +676,7 @@ impl OpProofsStore for MdbxProofsStorage { })? } - async fn store_hashed_accounts( + fn store_hashed_accounts( &self, accounts: Vec<(B256, Option)>, ) -> OpProofsStorageResult<()> { @@ -694,7 +694,7 @@ impl OpProofsStore for MdbxProofsStorage { })? } - async fn store_hashed_storages( + fn store_hashed_storages( &self, hashed_address: B256, storages: Vec<(B256, U256)>, @@ -720,11 +720,11 @@ impl OpProofsStore for MdbxProofsStorage { })? } - async fn get_earliest_block_number(&self) -> OpProofsStorageResult> { + fn get_earliest_block_number(&self) -> OpProofsStorageResult> { self.env.view(|tx| self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock))? } - async fn get_latest_block_number(&self) -> OpProofsStorageResult> { + fn get_latest_block_number(&self) -> OpProofsStorageResult> { self.env.view(|tx| self.inner_get_latest_block_number_hash(tx))? } @@ -770,7 +770,7 @@ impl OpProofsStore for MdbxProofsStorage { Ok(MdbxAccountCursor::new(cursor, max_block_number)) } - async fn store_trie_updates( + fn store_trie_updates( &self, block_ref: BlockWithParent, block_state_diff: BlockStateDiff, @@ -779,7 +779,7 @@ impl OpProofsStore for MdbxProofsStorage { .update(|tx| self.store_trie_updates_append_only(tx, block_ref, block_state_diff))? } - async fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { + fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { self.env.view(|tx| { let mut change_set_cursor = tx.cursor_read::()?; let (_, change_set) = change_set_cursor @@ -893,7 +893,7 @@ impl OpProofsStore for MdbxProofsStorage { /// - `new_earliest_block_ref`: The new earliest block reference (with parent hash). /// - `diff`: The state diff to apply to the initial state (block 0). This diff represents all /// the changes from the old earliest block to the new earliest block (inclusive). - async fn prune_earliest_state( + fn prune_earliest_state( &self, new_earliest_block_ref: BlockWithParent, ) -> OpProofsStorageResult { @@ -953,7 +953,7 @@ impl OpProofsStore for MdbxProofsStorage { /// Unwind the historical state to `unwind_upto_block` (inclusive), deleting all history /// starting from provided block. Also updates the `ProofWindow::LatestBlock` to parent of /// `unwind_upto_block`. - async fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()> { + fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()> { let history_to_delete = self.env.view(|tx| self.collect_history_ranged(tx, to.block.number..))??; @@ -990,7 +990,7 @@ impl OpProofsStore for MdbxProofsStorage { })? } - async fn replace_updates( + fn replace_updates( &self, latest_common_block: BlockNumHash, mut blocks_to_add: Vec<(BlockWithParent, BlockStateDiff)>, @@ -1022,24 +1022,24 @@ impl OpProofsStore for MdbxProofsStorage { })? } - async fn set_earliest_block_number( + fn set_earliest_block_number( &self, block_number: u64, hash: B256, ) -> OpProofsStorageResult<()> { - self.set_earliest_block_number_hash(block_number, hash).await + self.set_earliest_block_number_hash(block_number, hash) } } impl OpProofsInitialStateStore for MdbxProofsStorage { - async fn initial_state_anchor(&self) -> OpProofsStorageResult { + fn initial_state_anchor(&self) -> OpProofsStorageResult { // 1) NotStarted: no anchor row let Some(block) = self.get_initial_state_anchor()? else { return Ok(InitialStateAnchor::default()); }; // 2) Completed: anchor exists + earliest is set - let completed = self.get_earliest_block_number().await?.is_some(); + let completed = self.get_earliest_block_number()?.is_some(); // 3) InProgress / Completed: populate details Ok(InitialStateAnchor { @@ -1056,7 +1056,7 @@ impl OpProofsInitialStateStore for MdbxProofsStorage { }) } - async fn set_initial_state_anchor(&self, anchor: BlockNumHash) -> OpProofsStorageResult<()> { + fn set_initial_state_anchor(&self, anchor: BlockNumHash) -> OpProofsStorageResult<()> { self.env.update(|tx| { let mut cur = tx.cursor_write::()?; cur.insert(ProofWindowKey::InitialStateAnchor, &anchor.into())?; @@ -1064,9 +1064,9 @@ impl OpProofsInitialStateStore for MdbxProofsStorage { })? } - async fn commit_initial_state(&self) -> OpProofsStorageResult { + fn commit_initial_state(&self) -> OpProofsStorageResult { let anchor = self.get_initial_state_anchor()?.ok_or(NoBlocksFound)?; - self.set_earliest_block_number(anchor.number, anchor.hash).await?; + self.set_earliest_block_number(anchor.number, anchor.hash)?; Ok(anchor) } } @@ -1181,14 +1181,14 @@ mod tests { const B0: u64 = 0; - #[tokio::test] - async fn store_hashed_accounts_writes_versioned_values() { + #[test] + fn store_hashed_accounts_writes_versioned_values() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); let addr = B256::from([0xAA; 32]); let account = Account::default(); - store.store_hashed_accounts(vec![(addr, Some(account))]).await.expect("write accounts"); + store.store_hashed_accounts(vec![(addr, Some(account))]).expect("write accounts"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.new_cursor::().expect("cursor"); @@ -1199,8 +1199,8 @@ mod tests { assert_eq!(vv.value.0, Some(account)); } - #[tokio::test] - async fn store_hashed_accounts_multiple_items_unsorted() { + #[test] + fn store_hashed_accounts_multiple_items_unsorted() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1213,7 +1213,6 @@ mod tests { store .store_hashed_accounts(vec![(a2, None), (a1, Some(acc1)), (a3, Some(acc3))]) - .await .expect("write"); let tx = store.env.tx().expect("ro tx"); @@ -1232,8 +1231,8 @@ mod tests { assert_eq!(v3.value.0, Some(acc3)); } - #[tokio::test] - async fn store_hashed_accounts_multiple_calls() { + #[test] + fn store_hashed_accounts_multiple_calls() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1251,7 +1250,6 @@ mod tests { { store .store_hashed_accounts(vec![(a2, None), (a1, Some(acc1)), (a4, Some(acc4))]) - .await .expect("write"); let tx = store.env.tx().expect("ro tx"); @@ -1272,10 +1270,7 @@ mod tests { { // Second call - store - .store_hashed_accounts(vec![(a5, Some(acc5)), (a3, Some(acc3))]) - .await - .expect("write"); + store.store_hashed_accounts(vec![(a5, Some(acc5)), (a3, Some(acc3))]).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.new_cursor::().expect("cursor"); @@ -1290,8 +1285,8 @@ mod tests { } } - #[tokio::test] - async fn store_hashed_storages_writes_versioned_values() { + #[test] + fn store_hashed_storages_writes_versioned_values() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1299,7 +1294,7 @@ mod tests { let slot = B256::from([0x22; 32]); let val = U256::from(0x1234u64); - store.store_hashed_storages(addr, vec![(slot, val)]).await.expect("write storage"); + store.store_hashed_storages(addr, vec![(slot, val)]).expect("write storage"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.new_cursor::().expect("cursor"); @@ -1312,8 +1307,8 @@ mod tests { assert_eq!(inner.0, val); } - #[tokio::test] - async fn store_hashed_storages_multiple_slots_unsorted() { + #[test] + fn store_hashed_storages_multiple_slots_unsorted() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1325,7 +1320,7 @@ mod tests { let s3 = B256::from([0x03; 32]); let v3 = U256::from(3u64); - store.store_hashed_storages(addr, vec![(s2, v2), (s1, v1), (s3, v3)]).await.expect("write"); + store.store_hashed_storages(addr, vec![(s2, v2), (s1, v1), (s3, v3)]).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.new_cursor::().expect("cursor"); @@ -1339,8 +1334,8 @@ mod tests { } } - #[tokio::test] - async fn store_hashed_storages_multiple_calls() { + #[test] + fn store_hashed_storages_multiple_calls() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1357,10 +1352,7 @@ mod tests { let v5 = U256::from(5u64); { - store - .store_hashed_storages(addr, vec![(s2, v2), (s1, v1), (s5, v5)]) - .await - .expect("write"); + store.store_hashed_storages(addr, vec![(s2, v2), (s1, v1), (s5, v5)]).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.new_cursor::().expect("cursor"); @@ -1376,7 +1368,7 @@ mod tests { { // Second call - store.store_hashed_storages(addr, vec![(s4, v4), (s3, v3)]).await.expect("write"); + store.store_hashed_storages(addr, vec![(s4, v4), (s3, v3)]).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.new_cursor::().expect("cursor"); @@ -1391,8 +1383,8 @@ mod tests { } } - #[tokio::test] - async fn test_store_account_branches_writes_versioned_values() { + #[test] + fn test_store_account_branches_writes_versioned_values() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1400,7 +1392,7 @@ mod tests { let branch_node = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::random())); let updates = vec![(nibble, Some(branch_node.clone()))]; - store.store_account_branches(updates).await.expect("write"); + store.store_account_branches(updates).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1414,8 +1406,8 @@ mod tests { assert_eq!(vv.value.0, Some(branch_node)); } - #[tokio::test] - async fn test_store_account_branches_multiple_items_unsorted() { + #[test] + fn test_store_account_branches_multiple_items_unsorted() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1425,8 +1417,8 @@ mod tests { let n3 = Nibbles::from_nibbles_unchecked([0x03]); let b3 = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::random())); - let updates = vec![(n2, None), (n1, Some(b1.clone())), (n3, Some(b3.clone()))]; - store.store_account_branches(updates.clone()).await.expect("write"); + let updates = vec![(n2, None), (n1, Some(b1)), (n3, Some(b3))]; + store.store_account_branches(updates.clone()).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1441,8 +1433,8 @@ mod tests { } } - #[tokio::test] - async fn store_account_branches_multiple_calls() { + #[test] + fn store_account_branches_multiple_calls() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1457,8 +1449,8 @@ mod tests { let b5 = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::random())); { - let updates1 = vec![(n2, None), (n1, Some(b1.clone())), (n4, Some(b4.clone()))]; - store.store_account_branches(updates1.clone()).await.expect("write"); + let updates1 = vec![(n2, None), (n1, Some(b1)), (n4, Some(b4))]; + store.store_account_branches(updates1.clone()).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1475,8 +1467,8 @@ mod tests { { // Second call - let updates2 = vec![(n5, Some(b5.clone())), (n3, Some(b3.clone()))]; - store.store_account_branches(updates2.clone()).await.expect("write"); + let updates2 = vec![(n5, Some(b5)), (n3, Some(b3))]; + store.store_account_branches(updates2.clone()).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1492,8 +1484,8 @@ mod tests { } } - #[tokio::test] - async fn test_store_storage_branches_writes_versioned_values() { + #[test] + fn test_store_storage_branches_writes_versioned_values() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1502,7 +1494,7 @@ mod tests { let branch_node = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::random())); let items = vec![(nibble, Some(branch_node.clone()))]; - store.store_storage_branches(hashed_address, items).await.expect("write"); + store.store_storage_branches(hashed_address, items).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1514,8 +1506,8 @@ mod tests { assert_eq!(vv.value.0, Some(branch_node)); } - #[tokio::test] - async fn store_storage_branches_multiple_items_unsorted() { + #[test] + fn store_storage_branches_multiple_items_unsorted() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1526,8 +1518,8 @@ mod tests { let n3 = Nibbles::from_nibbles_unchecked([0x03]); let b3 = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::random())); - let items = vec![(n2, None), (n1, Some(b1.clone())), (n3, Some(b3.clone()))]; - store.store_storage_branches(hashed_address, items.clone()).await.expect("write"); + let items = vec![(n2, None), (n1, Some(b1)), (n3, Some(b3))]; + store.store_storage_branches(hashed_address, items.clone()).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1540,8 +1532,8 @@ mod tests { } } - #[tokio::test] - async fn store_storage_branches_multiple_calls() { + #[test] + fn store_storage_branches_multiple_calls() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1557,8 +1549,8 @@ mod tests { let b5 = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::random())); { - let items1 = vec![(n2, None), (n1, Some(b1.clone())), (n5, Some(b5.clone()))]; - store.store_storage_branches(hashed_address, items1.clone()).await.expect("write"); + let items1 = vec![(n2, None), (n1, Some(b1)), (n5, Some(b5))]; + store.store_storage_branches(hashed_address, items1.clone()).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1573,8 +1565,8 @@ mod tests { { // Second call - let items2 = vec![(n4, Some(b4.clone())), (n3, Some(b3.clone()))]; - store.store_storage_branches(hashed_address, items2.clone()).await.expect("write"); + let items2 = vec![(n4, Some(b4)), (n3, Some(b3))]; + store.store_storage_branches(hashed_address, items2.clone()).expect("write"); let tx = store.env.tx().expect("ro tx"); let mut cur = tx.cursor_dup_read::().expect("cursor"); @@ -1588,8 +1580,8 @@ mod tests { } } - #[tokio::test] - async fn test_store_trie_updates_comprehensive() { + #[test] + fn test_store_trie_updates_comprehensive() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1629,17 +1621,17 @@ mod tests { let mut block_state_diff_post_state = HashedPostState::default(); // Add account trie nodes - block_state_diff_trie_updates.account_nodes.insert(account_path1, account_node1.clone()); - block_state_diff_trie_updates.account_nodes.insert(account_path2, account_node2.clone()); + block_state_diff_trie_updates.account_nodes.insert(account_path1, account_node1); + block_state_diff_trie_updates.account_nodes.insert(account_path2, account_node2); block_state_diff_trie_updates.removed_nodes.insert(removed_account_path); // Add storage trie nodes for two addresses let mut storage_nodes1 = StorageTrieUpdates::default(); - storage_nodes1.storage_nodes.insert(storage_path1, storage_node1.clone()); + storage_nodes1.storage_nodes.insert(storage_path1, storage_node1); block_state_diff_trie_updates.storage_tries.insert(addr1, storage_nodes1); let mut storage_nodes2 = StorageTrieUpdates::default(); - storage_nodes2.storage_nodes.insert(storage_path2, storage_node2.clone()); + storage_nodes2.storage_nodes.insert(storage_path2, storage_node2); block_state_diff_trie_updates.storage_tries.insert(addr2, storage_nodes2); // Add hashed accounts (one Some, one None) @@ -1660,7 +1652,7 @@ mod tests { sorted_trie_updates: block_state_diff_trie_updates.into_sorted(), sorted_post_state: block_state_diff_post_state.into_sorted(), }; - store.store_trie_updates(BLOCK, block_state_diff).await.expect("store"); + store.store_trie_updates(BLOCK, block_state_diff).expect("store"); // Verify account trie nodes { @@ -1773,8 +1765,8 @@ mod tests { } } - #[tokio::test] - async fn store_trie_updates_out_of_order_rejects() { + #[test] + fn store_trie_updates_out_of_order_rejects() { let dir = tempfile::TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1782,7 +1774,6 @@ mod tests { let existing_block = BlockWithParent::new(B256::random(), NumHash::new(1, B256::random())); store .set_earliest_block_number(existing_block.block.number, existing_block.block.hash) - .await .expect("set"); // incoming block whose parent != existing latest @@ -1791,15 +1782,15 @@ mod tests { BlockWithParent::new(bad_parent, NumHash::new(2, B256::ZERO)); let diff = BlockStateDiff::default(); - let res = store.store_trie_updates(bad_block, diff).await; + let res = store.store_trie_updates(bad_block, diff); assert!(matches!(res, Err(OpProofsStorageError::OutOfOrder { .. }))); // verify nothing written: proof window still unchanged - let latest = store.get_latest_block_number().await.expect("get latest"); + let latest = store.get_latest_block_number().expect("get latest"); assert_eq!(latest.unwrap().1, existing_block.block.hash); } - #[tokio::test] - async fn store_trie_updates_multiple_blocks_append_versions() { + #[test] + fn store_trie_updates_multiple_blocks_append_versions() { let dir = tempfile::TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1813,7 +1804,7 @@ mod tests { sorted_trie_updates: TrieUpdatesSorted::default(), sorted_post_state: diff_a_post_state.into_sorted(), }; - store.store_trie_updates(block_a, diff_a).await.expect("store A"); + store.store_trie_updates(block_a, diff_a).expect("store A"); // block B (parent = hash of A) let block_b = BlockWithParent::new(block_a.block.hash, NumHash::new(2, B256::random())); @@ -1824,7 +1815,7 @@ mod tests { sorted_trie_updates: TrieUpdatesSorted::default(), sorted_post_state: diff_b_post_state.into_sorted(), }; - store.store_trie_updates(block_b, diff_b).await.expect("store B"); + store.store_trie_updates(block_b, diff_b).expect("store B"); // verify we can retrieve entries for both block numbers let tx = store.env.tx().expect("tx"); @@ -1837,8 +1828,8 @@ mod tests { assert_eq!(v_b.block_number, block_b.block.number); } - #[tokio::test] - async fn test_store_trie_updates_empty_collections() { + #[test] + fn test_store_trie_updates_empty_collections() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1849,7 +1840,7 @@ mod tests { let block_state_diff = BlockStateDiff::default(); // This should work without errors - store.store_trie_updates(BLOCK, block_state_diff).await.expect("store"); + store.store_trie_updates(BLOCK, block_state_diff).expect("store"); // Verify nothing was written (should be empty) let tx = store.env.tx().expect("tx"); @@ -1873,33 +1864,33 @@ mod tests { ); } - #[tokio::test] - async fn fetch_trie_updates_missing_changeset_returns_error() { + #[test] + fn fetch_trie_updates_missing_changeset_returns_error() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - let res = store.fetch_trie_updates(99).await; + let res = store.fetch_trie_updates(99); assert!(matches!(res, Err(OpProofsStorageError::NoChangeSetForBlock(99)))); } - #[tokio::test] - async fn fetch_trie_updates_empty_changeset() { + #[test] + fn fetch_trie_updates_empty_changeset() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); let block = BlockWithParent::new(B256::ZERO, NumHash::new(1, B256::random())); let diff = BlockStateDiff::default(); - store.store_trie_updates(block, diff).await.expect("store"); - let got = store.fetch_trie_updates(1).await.expect("fetch"); + store.store_trie_updates(block, diff).expect("store"); + let got = store.fetch_trie_updates(1).expect("fetch"); assert!(got.sorted_trie_updates.account_nodes_ref().is_empty()); assert!(got.sorted_trie_updates.storage_tries_ref().is_empty()); assert!(got.sorted_post_state.accounts.is_empty()); assert!(got.sorted_post_state.storages.is_empty()); } - #[tokio::test] - async fn fetch_trie_updates_missing_account_history_entry_returns_error() { + #[test] + fn fetch_trie_updates_missing_account_history_entry_returns_error() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1919,12 +1910,12 @@ mod tests { tx.commit().unwrap(); } - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingAccountTrieHistory(..)))); } - #[tokio::test] - async fn fetch_trie_updates_account_history_seek_returns_later_block_treated_as_missing() { + #[test] + fn fetch_trie_updates_account_history_seek_returns_later_block_treated_as_missing() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1955,12 +1946,12 @@ mod tests { // fetch block 1 -> seek will find block 2 but block_number != 1 so expect // MissingAccountTrieHistory - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingAccountTrieHistory(..)))); } - #[tokio::test] - async fn fetch_trie_updates_missing_storage_history_entry_returns_error() { + #[test] + fn fetch_trie_updates_missing_storage_history_entry_returns_error() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -1983,12 +1974,12 @@ mod tests { tx.commit().unwrap(); } - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingStorageTrieHistory(..)))); } - #[tokio::test] - async fn fetch_trie_updates_storage_history_seek_returns_later_block_treated_as_missing() { + #[test] + fn fetch_trie_updates_storage_history_seek_returns_later_block_treated_as_missing() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2023,12 +2014,12 @@ mod tests { // fetch block 1 -> seek will find block 2 but block_number != 1 so expect // MissingStorageTrieHistory - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingStorageTrieHistory(..)))); } - #[tokio::test] - async fn fetch_trie_updates_missing_hashed_account_entry_returns_error() { + #[test] + fn fetch_trie_updates_missing_hashed_account_entry_returns_error() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2048,12 +2039,12 @@ mod tests { tx.commit().unwrap(); } - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingHashedAccountHistory(..)))); } - #[tokio::test] - async fn fetch_trie_updates_hashed_account_seek_returns_later_block_treated_as_missing() { + #[test] + fn fetch_trie_updates_hashed_account_seek_returns_later_block_treated_as_missing() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2082,12 +2073,12 @@ mod tests { // fetch block 1 -> seek will find block 2 but block_number != 1 so expect // MissingHashedAccountHistory - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingHashedAccountHistory(..)))); } - #[tokio::test] - async fn fetch_trie_updates_missing_hashed_storage_entry_returns_error() { + #[test] + fn fetch_trie_updates_missing_hashed_storage_entry_returns_error() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2110,12 +2101,12 @@ mod tests { tx.commit().unwrap(); } - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingHashedStorageHistory { .. }))); } - #[tokio::test] - async fn fetch_trie_updates_hashed_storage_seek_returns_later_block_treated_as_missing() { + #[test] + fn fetch_trie_updates_hashed_storage_seek_returns_later_block_treated_as_missing() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2147,12 +2138,12 @@ mod tests { // fetch block 1 -> seek will find block 2 but block_number != 1 so expect // MissingHashedStorageHistory - let res = store.fetch_trie_updates(1).await; + let res = store.fetch_trie_updates(1); assert!(matches!(res, Err(OpProofsStorageError::MissingHashedStorageHistory { .. }))); } - #[tokio::test] - async fn fetch_trie_updates_basic() { + #[test] + fn fetch_trie_updates_basic() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2183,11 +2174,11 @@ mod tests { // Construct BlockStateDiff let mut block_state_diff_trie_updates = TrieUpdates::default(); - block_state_diff_trie_updates.account_nodes.insert(account_path1, account_node1.clone()); - block_state_diff_trie_updates.account_nodes.insert(account_path2, account_node2.clone()); + block_state_diff_trie_updates.account_nodes.insert(account_path1, account_node1); + block_state_diff_trie_updates.account_nodes.insert(account_path2, account_node2); // storage trie for addr1 let mut storage_nodes1 = StorageTrieUpdates::default(); - storage_nodes1.storage_nodes.insert(storage_path1, storage_node1.clone()); + storage_nodes1.storage_nodes.insert(storage_path1, storage_node1); block_state_diff_trie_updates.storage_tries.insert(addr1, storage_nodes1); // hashed accounts: addr1 -> Some, addr2 -> None @@ -2209,8 +2200,8 @@ mod tests { sorted_trie_updates: block_state_diff_trie_updates.into_sorted(), sorted_post_state: block_state_diff_post_state.into_sorted(), }; - store.store_trie_updates(block, block_state_diff.clone()).await.expect("store"); - let got = store.fetch_trie_updates(1).await.expect("fetch"); + store.store_trie_updates(block, block_state_diff.clone()).expect("store"); + let got = store.fetch_trie_updates(1).expect("fetch"); // verify trie updates assert_eq!( @@ -2227,12 +2218,12 @@ mod tests { assert_eq!(got.sorted_post_state.storages, block_state_diff.sorted_post_state.storages); } - #[tokio::test] - async fn test_prune_earliest_state_single_entry() { + #[test] + fn test_prune_earliest_state_single_entry() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); let block = BlockWithParent::new(B256::ZERO, NumHash::new(1, B256::random())); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); // Insert a single entry to be pruned let addr = B256::random(); @@ -2242,11 +2233,11 @@ mod tests { sorted_post_state: state_diff_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block, state_diff).await.unwrap(); + store.store_trie_updates(block, state_diff).unwrap(); // Prune the entry - pass empty diff since we're just removing data let next_block = BlockWithParent::new(block.block.hash, NumHash::new(2, B256::random())); - store.prune_earliest_state(next_block).await.unwrap(); + store.prune_earliest_state(next_block).unwrap(); // Verify the entry was pruned let tx = store.env.tx().unwrap(); @@ -2263,16 +2254,16 @@ mod tests { assert!(pruning_cur.seek_exact(block.block.number).unwrap().is_none()); // Verify earliest block was updated - let earliest = store.get_earliest_block_number().await.unwrap(); + let earliest = store.get_earliest_block_number().unwrap(); assert_eq!(earliest, Some((2, next_block.block.hash))); } - #[tokio::test] - async fn test_prune_earliest_state_multiple_entries_same_block() { + #[test] + fn test_prune_earliest_state_multiple_entries_same_block() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); let block = BlockWithParent::new(B256::ZERO, NumHash::new(1, B256::random())); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); // Insert multiple entries for the same block let addr1 = B256::random(); @@ -2284,11 +2275,11 @@ mod tests { sorted_post_state: state_diff_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block, state_diff).await.unwrap(); + store.store_trie_updates(block, state_diff).unwrap(); // Prune the entries let next_block = BlockWithParent::new(block.block.hash, NumHash::new(2, B256::random())); - store.prune_earliest_state(next_block).await.unwrap(); + store.prune_earliest_state(next_block).unwrap(); // Verify the entries were pruned let tx = store.env.tx().unwrap(); @@ -2306,14 +2297,14 @@ mod tests { assert!(pruning_cur.seek_exact(block.block.number).unwrap().is_none()); } - #[tokio::test] - async fn test_prune_earliest_state_multiple_blocks() { + #[test] + fn test_prune_earliest_state_multiple_blocks() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); let block_1 = BlockWithParent::new(B256::ZERO, NumHash::new(1, B256::random())); let block_2 = BlockWithParent::new(block_1.block.hash, NumHash::new(2, B256::random())); let block_3 = BlockWithParent::new(block_2.block.hash, NumHash::new(3, B256::random())); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); // Insert entries for multiple blocks let addr1 = B256::random(); @@ -2324,7 +2315,7 @@ mod tests { sorted_post_state: state_diff1_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_1, state_diff1).await.unwrap(); + store.store_trie_updates(block_1, state_diff1).unwrap(); let mut state_diff2_post_state = HashedPostState::default(); state_diff2_post_state.accounts.insert(addr2, Some(Account::default())); @@ -2332,10 +2323,10 @@ mod tests { sorted_post_state: state_diff2_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_2, state_diff2).await.unwrap(); + store.store_trie_updates(block_2, state_diff2).unwrap(); // Prune up to block 3 (should remove blocks 1 and 2) - store.prune_earliest_state(block_3).await.unwrap(); + store.prune_earliest_state(block_3).unwrap(); // Verify the entries were pruned let tx = store.env.tx().unwrap(); @@ -2354,39 +2345,39 @@ mod tests { assert!(pruning_cur.seek_exact(2).unwrap().is_none()); } - #[tokio::test] - async fn test_prune_earliest_state_no_op() { + #[test] + fn test_prune_earliest_state_no_op() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(1, B256::random()).await.unwrap(); + store.set_earliest_block_number(1, B256::random()).unwrap(); // Attempt to prune with a new earliest block that is not newer let block_1 = BlockWithParent::new(B256::ZERO, NumHash::new(1, B256::random())); let block_0 = BlockWithParent::new(B256::ZERO, NumHash::new(0, B256::random())); - store.prune_earliest_state(block_1).await.unwrap(); - store.prune_earliest_state(block_0).await.unwrap(); + store.prune_earliest_state(block_1).unwrap(); + store.prune_earliest_state(block_0).unwrap(); // Nothing should have been pruned, this call should not panic or error } - #[tokio::test] - async fn test_prune_earliest_state_no_entries_to_prune() { + #[test] + fn test_prune_earliest_state_no_entries_to_prune() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(1, B256::random()).await.unwrap(); + store.set_earliest_block_number(1, B256::random()).unwrap(); // Prune a range where no entries exist let block_10 = BlockWithParent::new(B256::ZERO, NumHash::new(10, B256::random())); - store.prune_earliest_state(block_10).await.unwrap(); + store.prune_earliest_state(block_10).unwrap(); // Nothing should have been pruned, this call should not panic or error } - #[tokio::test] - async fn test_prune_earliest_state_with_diff_insertion() { + #[test] + fn test_prune_earliest_state_with_diff_insertion() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); // Insert entries for blocks 1 and 2 let addr1 = B256::random(); @@ -2401,7 +2392,7 @@ mod tests { sorted_post_state: state_diff1_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_1, state_diff1).await.unwrap(); + store.store_trie_updates(block_1, state_diff1).unwrap(); let block_2 = BlockWithParent::new(block_1.block.hash, NumHash::new(2, B256::random())); let mut state_diff2_post_state = HashedPostState::default(); @@ -2410,11 +2401,11 @@ mod tests { sorted_post_state: state_diff2_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_2, state_diff2).await.unwrap(); + store.store_trie_updates(block_2, state_diff2).unwrap(); // Now prune to block 3 let block_3 = BlockWithParent::new(block_2.block.hash, NumHash::new(3, B256::random())); - store.prune_earliest_state(block_3).await.unwrap(); + store.prune_earliest_state(block_3).unwrap(); let tx = store.env.tx().unwrap(); let mut cur = tx.new_cursor::().unwrap(); @@ -2435,15 +2426,15 @@ mod tests { assert!(pruning_cur.seek_exact(2).unwrap().is_none()); // Verify earliest block was updated - let earliest = store.get_earliest_block_number().await.unwrap(); + let earliest = store.get_earliest_block_number().unwrap(); assert_eq!(earliest, Some((3, block_3.block.hash))); } - #[tokio::test] - async fn test_prune_earliest_state_with_removed_nodes() { + #[test] + fn test_prune_earliest_state_with_removed_nodes() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); // Create some trie nodes in blocks 1, 2, 3 let path1 = Nibbles::from_nibbles_unchecked([0x01, 0x02]); @@ -2453,12 +2444,12 @@ mod tests { let block_1 = BlockWithParent::new(B256::ZERO, NumHash::new(1, B256::random())); let mut diff1_trie_updates = TrieUpdates::default(); - diff1_trie_updates.account_nodes.insert(path1, node1.clone()); + diff1_trie_updates.account_nodes.insert(path1, node1); let diff1 = BlockStateDiff { sorted_trie_updates: diff1_trie_updates.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_1, diff1).await.unwrap(); + store.store_trie_updates(block_1, diff1).unwrap(); let block_2 = BlockWithParent::new(block_1.block.hash, NumHash::new(2, B256::random())); let mut diff2_trie_updates = TrieUpdates::default(); @@ -2467,7 +2458,7 @@ mod tests { sorted_trie_updates: diff2_trie_updates.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_2, diff2).await.unwrap(); + store.store_trie_updates(block_2, diff2).unwrap(); // In block 3, path1 is deleted (stored as None in the database) // This happens when we store trie updates with path1 mapped to None @@ -2507,7 +2498,7 @@ mod tests { // - path1 should be in removed_nodes (it was deleted in block 3) // - path2 should be included with its value (it still exists from block 2) let block_5 = BlockWithParent::new(B256::random(), NumHash::new(5, B256::random())); - store.prune_earliest_state(block_5).await.unwrap(); + store.prune_earliest_state(block_5).unwrap(); // Verify that all entries for path1 before block 5 were removed let tx = store.env.tx().unwrap(); @@ -2534,11 +2525,11 @@ mod tests { assert_eq!(v2.value.0, Some(node2)); } - #[tokio::test] - async fn test_prune_earliest_state_overlapping_keys() { + #[test] + fn test_prune_earliest_state_overlapping_keys() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); // Use overlapping key (addr1 updated in blocks 1 and 2) let addr1 = B256::random(); @@ -2552,7 +2543,7 @@ mod tests { sorted_post_state: diff1_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_1, diff1).await.unwrap(); + store.store_trie_updates(block_1, diff1).unwrap(); let block_2 = BlockWithParent::new(block_1.block.hash, NumHash::new(2, B256::random())); let mut diff2_post_state = HashedPostState::default(); @@ -2561,11 +2552,11 @@ mod tests { sorted_post_state: diff2_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_2, diff2).await.unwrap(); + store.store_trie_updates(block_2, diff2).unwrap(); // Prune to block 3 let block_3 = BlockWithParent::new(block_2.block.hash, NumHash::new(3, B256::random())); - store.prune_earliest_state(block_3).await.unwrap(); + store.prune_earliest_state(block_3).unwrap(); let tx = store.env.tx().unwrap(); let mut cur = tx.new_cursor::().unwrap(); @@ -2577,11 +2568,11 @@ mod tests { assert_eq!(vv1.value.0, Some(acc2)); } - #[tokio::test] - async fn test_prune_earliest_state_comprehensive() { + #[test] + fn test_prune_earliest_state_comprehensive() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); // Setup complex scenario with accounts, storage, and trie nodes let addr1 = B256::random(); @@ -2612,7 +2603,7 @@ mod tests { sorted_post_state: diff1_post_state.into_sorted(), sorted_trie_updates: diff1_trie_updates.into_sorted(), }; - store.store_trie_updates(block_1, diff_1).await.unwrap(); + store.store_trie_updates(block_1, diff_1).unwrap(); // Block 2: Update account (overwriting addr1) let acc2 = Account { nonce: 2, balance: U256::from(200), ..Default::default() }; @@ -2624,11 +2615,11 @@ mod tests { sorted_post_state: diff2_post_state.into_sorted(), ..Default::default() }; - store.store_trie_updates(block_2, diff2).await.unwrap(); + store.store_trie_updates(block_2, diff2).unwrap(); // Prune to block 3 let block_3 = BlockWithParent::new(block_2.block.hash, NumHash::new(3, B256::random())); - store.prune_earliest_state(block_3).await.unwrap(); + store.prune_earliest_state(block_3).unwrap(); let tx = store.env.tx().unwrap(); @@ -2673,11 +2664,11 @@ mod tests { assert!(change_cur.seek_exact(2).unwrap().is_none()); } - #[tokio::test] - async fn test_prune_earliest_state_churn_create_delete_recreate() { + #[test] + fn test_prune_earliest_state_churn_create_delete_recreate() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); let addr = B256::random(); @@ -2691,7 +2682,6 @@ mod tests { b1, BlockStateDiff { sorted_post_state: diff1.into_sorted(), ..Default::default() }, ) - .await .unwrap(); // Block 2: Delete @@ -2703,7 +2693,6 @@ mod tests { b2, BlockStateDiff { sorted_post_state: diff2.into_sorted(), ..Default::default() }, ) - .await .unwrap(); // Block 3: Recreate @@ -2716,11 +2705,10 @@ mod tests { b3, BlockStateDiff { sorted_post_state: diff3.into_sorted(), ..Default::default() }, ) - .await .unwrap(); // Prune to Block 3 - store.prune_earliest_state(b3).await.unwrap(); + store.prune_earliest_state(b3).unwrap(); let tx = store.env.tx().unwrap(); let mut cur = tx.new_cursor::().unwrap(); @@ -2750,11 +2738,11 @@ mod tests { assert_eq!(val.value.0, Some(acc3)); } - #[tokio::test] - async fn test_prune_earliest_state_returns_correct_counts() { + #[test] + fn test_prune_earliest_state_returns_correct_counts() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); let addr = B256::random(); @@ -2768,7 +2756,6 @@ mod tests { b1, BlockStateDiff { sorted_post_state: diff1.into_sorted(), ..Default::default() }, ) - .await .unwrap(); // Block 2: Update @@ -2781,47 +2768,46 @@ mod tests { b2, BlockStateDiff { sorted_post_state: diff2.into_sorted(), ..Default::default() }, ) - .await .unwrap(); // Prune to Block 2. // Survivor is at Block 2. // Block 1 should be deleted. // Count should be 1. - let counts = store.prune_earliest_state(b2).await.unwrap(); + let counts = store.prune_earliest_state(b2).unwrap(); assert_eq!(counts.hashed_accounts_written_total, 1); assert_eq!(counts.account_trie_updates_written_total, 0); } - #[tokio::test] - async fn test_prune_earliest_state_empty_window_updates_pointer() { + #[test] + fn test_prune_earliest_state_empty_window_updates_pointer() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); - store.set_earliest_block_number(0, B256::ZERO).await.unwrap(); + store.set_earliest_block_number(0, B256::ZERO).unwrap(); let target = BlockWithParent::new(B256::random(), NumHash::new(5, B256::random())); // Prune empty - store.prune_earliest_state(target).await.unwrap(); + store.prune_earliest_state(target).unwrap(); - let earliest = store.get_earliest_block_number().await.unwrap(); + let earliest = store.get_earliest_block_number().unwrap(); assert_eq!(earliest, Some((5, target.block.hash))); } - #[tokio::test] - async fn test_prune_earliest_state_uninitialized_guard() { + #[test] + fn test_prune_earliest_state_uninitialized_guard() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); // Earliest not set let target = BlockWithParent::new(B256::random(), NumHash::new(5, B256::random())); - let counts = store.prune_earliest_state(target).await.unwrap(); + let counts = store.prune_earliest_state(target).unwrap(); assert_eq!(counts, WriteCounts::default()); // Check earliest is still None - assert_eq!(store.get_earliest_block_number().await.unwrap(), None); + assert_eq!(store.get_earliest_block_number().unwrap(), None); } #[test] @@ -2877,8 +2863,8 @@ mod tests { assert!(walker.next().is_none()); } - #[tokio::test] - async fn store_trie_updates_deleted_account_trie() { + #[test] + fn store_trie_updates_deleted_account_trie() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2893,7 +2879,7 @@ mod tests { sorted_trie_updates: diff_trie_updates.into_sorted(), sorted_post_state: HashedPostStateSorted::default(), }; - store.store_trie_updates(BLOCK, diff).await.expect("store"); + store.store_trie_updates(BLOCK, diff).expect("store"); // Verify deletion was written at BLOCK let tx = store.env.tx().expect("tx"); @@ -2906,8 +2892,8 @@ mod tests { assert!(vv.value.0.is_none(), "expected account trie deletion"); } - #[tokio::test] - async fn store_trie_updates_deleted_storage_trie() { + #[test] + fn store_trie_updates_deleted_storage_trie() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -2927,7 +2913,7 @@ mod tests { sorted_trie_updates: diff_trie_updates.into_sorted(), sorted_post_state: HashedPostStateSorted::default(), }; - store.store_trie_updates(BLOCK, diff).await.expect("store"); + store.store_trie_updates(BLOCK, diff).expect("store"); // Verify deletion was written at BLOCK let tx = store.env.tx().expect("tx"); @@ -2938,8 +2924,8 @@ mod tests { assert!(vv.value.0.is_none(), "expected storage trie deletion"); } - #[tokio::test] - async fn store_trie_updates_wiped_storage_trie_nodes() { + #[test] + fn store_trie_updates_wiped_storage_trie_nodes() { use reth_trie::updates::StorageTrieUpdates; let dir = TempDir::new().unwrap(); @@ -2955,11 +2941,7 @@ mod tests { let n2 = BranchNodeCompact::default(); store - .store_storage_branches( - addr_wiped, - vec![(p1, Some(n1.clone())), (p2, Some(n2.clone()))], - ) - .await + .store_storage_branches(addr_wiped, vec![(p1, Some(n1)), (p2, Some(n2))]) .expect("seed wiped addr trie nodes"); // Build a BlockStateDiff that wipes addr_wiped's storage trie, and @@ -2977,7 +2959,7 @@ mod tests { let live_path = Nibbles::from_nibbles_unchecked([0xEE, 0xFF]); let live_node = BranchNodeCompact::default(); let mut live_updates = StorageTrieUpdates::default(); - live_updates.storage_nodes.insert(live_path, live_node.clone()); + live_updates.storage_nodes.insert(live_path, live_node); diff_trie_updates.storage_tries.insert(addr_live, live_updates); // Execute the store @@ -2985,7 +2967,7 @@ mod tests { sorted_trie_updates: diff_trie_updates.into_sorted(), sorted_post_state: HashedPostStateSorted::default(), }; - store.store_trie_updates(BLOCK, diff).await.expect("store"); + store.store_trie_updates(BLOCK, diff).expect("store"); // Verify: for addr_wiped, each previously existing path now has a deletion tombstone at // BLOCK. @@ -3019,8 +3001,8 @@ mod tests { } } - #[tokio::test] - async fn store_trie_updates_wiped_storage() { + #[test] + fn store_trie_updates_wiped_storage() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3035,7 +3017,7 @@ mod tests { let v2 = U256::from(222u64); // Seed prior storage (block_number = 0 in store_hashed_storages) - store.store_hashed_storages(addr, vec![(s1, v1), (s2, v2)]).await.expect("seed"); + store.store_hashed_storages(addr, vec![(s1, v1), (s2, v2)]).expect("seed"); // Build BlockStateDiff that marks this address as wiped at BLOCK let mut diff_post_state = HashedPostState::default(); @@ -3049,7 +3031,7 @@ mod tests { sorted_trie_updates: TrieUpdatesSorted::default(), sorted_post_state: diff_post_state.into_sorted(), }; - store.store_trie_updates(BLOCK, diff).await.expect("store"); + store.store_trie_updates(BLOCK, diff).expect("store"); // Verify: for each pre-existing slot, there should be a tombstone (MaybeDeleted(None)) at // BLOCK. @@ -3070,8 +3052,8 @@ mod tests { } } - #[tokio::test] - async fn store_trie_updates_wiped_and_non_wiped_mixed_order() { + #[test] + fn store_trie_updates_wiped_and_non_wiped_mixed_order() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3092,9 +3074,8 @@ mod tests { // Seed prior storage at block 0 for BOTH addresses store .store_hashed_storages(addr_wiped, vec![(ws1, wv1), (ws2, wv2)]) - .await .expect("seed wiped addr"); - store.store_hashed_storages(addr_live, vec![(ls1, lv1_old)]).await.expect("seed live addr"); + store.store_hashed_storages(addr_live, vec![(ls1, lv1_old)]).expect("seed live addr"); // Build diff: wiped first (by address sort), then non-wiped with a write const BLOCK: BlockWithParent = @@ -3115,7 +3096,7 @@ mod tests { sorted_trie_updates: TrieUpdatesSorted::default(), sorted_post_state: diff_post_state.into_sorted(), }; - store.store_trie_updates(BLOCK, diff).await.expect("store"); + store.store_trie_updates(BLOCK, diff).expect("store"); // Verify: wiped address got tombstones at BLOCK for each pre-existing slot { @@ -3148,35 +3129,35 @@ mod tests { } } - #[tokio::test] - async fn test_proof_window() { + #[test] + fn test_proof_window() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); // Test initial state (no values set) - let initial_value = store.get_earliest_block_number().await.expect("get earliest"); + let initial_value = store.get_earliest_block_number().expect("get earliest"); assert_eq!(initial_value, None); // Test setting the value let block_number = 42u64; let hash = B256::random(); - store.set_earliest_block_number(block_number, hash).await.expect("set earliest"); + store.set_earliest_block_number(block_number, hash).expect("set earliest"); // Verify value was stored correctly - let retrieved = store.get_earliest_block_number().await.expect("get earliest"); + let retrieved = store.get_earliest_block_number().expect("get earliest"); assert_eq!(retrieved, Some((block_number, hash))); // Test updating with new values let new_block_number = 100u64; let new_hash = B256::random(); - store.set_earliest_block_number(new_block_number, new_hash).await.expect("update earliest"); + store.set_earliest_block_number(new_block_number, new_hash).expect("update earliest"); // Verify update worked - let updated = store.get_earliest_block_number().await.expect("get updated earliest"); + let updated = store.get_earliest_block_number().expect("get updated earliest"); assert_eq!(updated, Some((new_block_number, new_hash))); // Verify that latest_block falls back to earliest when not set - let latest = store.get_latest_block_number().await.expect("get latest"); + let latest = store.get_latest_block_number().expect("get latest"); assert_eq!( latest, Some((new_block_number, new_hash)), @@ -3184,8 +3165,8 @@ mod tests { ); } - #[tokio::test] - async fn replace_updates_prunes_and_adds_new_chain() { + #[test] + fn replace_updates_prunes_and_adds_new_chain() { let dir = tempfile::TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3205,9 +3186,9 @@ mod tests { let b2 = BlockWithParent::new(b1.block.hash, NumHash::new(2, B256::random())); let b3 = BlockWithParent::new(b2.block.hash, NumHash::new(3, B256::random())); - store.store_trie_updates(b1, make_diff(10)).await.expect("store b1"); - store.store_trie_updates(b2, make_diff(20)).await.expect("store b2"); - store.store_trie_updates(b3, make_diff(30)).await.expect("store b3"); + store.store_trie_updates(b1, make_diff(10)).expect("store b1"); + store.store_trie_updates(b2, make_diff(20)).expect("store b2"); + store.store_trie_updates(b3, make_diff(30)).expect("store b3"); // Sanity: entries for 1,2,3 exist with expected nonces. { @@ -3230,7 +3211,6 @@ mod tests { store .replace_updates(BlockNumHash::new(2, b2.block.hash), blocks_to_add) - .await .expect("replace_updates succeeds"); // --- Verify post-conditions --- @@ -3275,8 +3255,8 @@ mod tests { } } - #[tokio::test] - async fn test_unwind_history_basic() { + #[test] + fn test_unwind_history_basic() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3297,14 +3277,14 @@ mod tests { let b3 = BlockWithParent::new(b2.block.hash, NumHash::new(3, B256::random())); let b4 = BlockWithParent::new(b3.block.hash, NumHash::new(4, B256::random())); - store.set_earliest_block_number_hash(b0.number, b0.hash).await.expect("set earliest"); - store.store_trie_updates(b1, make_diff(10)).await.expect("store b1"); - store.store_trie_updates(b2, make_diff(20)).await.expect("store b2"); - store.store_trie_updates(b3, make_diff(30)).await.expect("store b3"); - store.store_trie_updates(b4, make_diff(40)).await.expect("store b4"); + store.set_earliest_block_number_hash(b0.number, b0.hash).expect("set earliest"); + store.store_trie_updates(b1, make_diff(10)).expect("store b1"); + store.store_trie_updates(b2, make_diff(20)).expect("store b2"); + store.store_trie_updates(b3, make_diff(30)).expect("store b3"); + store.store_trie_updates(b4, make_diff(40)).expect("store b4"); // Unwind to block 2 - store.unwind_history(b2).await.expect("unwind"); + store.unwind_history(b2).expect("unwind"); // Verify: blocks 1 and 2 remain, blocks 3 and 4 are removed let tx = store.env.tx().expect("tx"); @@ -3325,8 +3305,8 @@ mod tests { assert_eq!(*latest.1.hash(), b2.parent); } - #[tokio::test] - async fn test_unwind_history_to_earliest() { + #[test] + fn test_unwind_history_to_earliest() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3345,22 +3325,19 @@ mod tests { let b2 = BlockWithParent::new(b1.block.hash, NumHash::new(2, B256::random())); let b3 = BlockWithParent::new(b2.block.hash, NumHash::new(3, B256::random())); - store - .set_earliest_block_number_hash(b1.block.number, b1.block.hash) - .await - .expect("set earliest"); - store.store_trie_updates(b2, make_diff(20)).await.expect("store b2"); - store.store_trie_updates(b3, make_diff(30)).await.expect("store b3"); + store.set_earliest_block_number_hash(b1.block.number, b1.block.hash).expect("set earliest"); + store.store_trie_updates(b2, make_diff(20)).expect("store b2"); + store.store_trie_updates(b3, make_diff(30)).expect("store b3"); // Unwind to block b1 - let res = store.unwind_history(b1).await; + let res = store.unwind_history(b1); // should fail as we cannot unwind past earliest block assert!(res.is_err(), "unwind to earliest block should error"); assert!(matches!(res.unwrap_err(), OpProofsStorageError::UnwindBeyondEarliest { .. })); } - #[tokio::test] - async fn test_unwind_history_with_storage() { + #[test] + fn test_unwind_history_with_storage() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3385,16 +3362,13 @@ mod tests { let b2 = BlockWithParent::new(b1.block.hash, NumHash::new(2, B256::random())); let b3 = BlockWithParent::new(b2.block.hash, NumHash::new(3, B256::random())); - store - .set_earliest_block_number_hash(b0.block.number, b0.block.hash) - .await - .expect("set earliest"); - store.store_trie_updates(b1, make_diff(10, 100)).await.expect("store b1"); - store.store_trie_updates(b2, make_diff(20, 200)).await.expect("store b2"); - store.store_trie_updates(b3, make_diff(30, 300)).await.expect("store b3"); + store.set_earliest_block_number_hash(b0.block.number, b0.block.hash).expect("set earliest"); + store.store_trie_updates(b1, make_diff(10, 100)).expect("store b1"); + store.store_trie_updates(b2, make_diff(20, 200)).expect("store b2"); + store.store_trie_updates(b3, make_diff(30, 300)).expect("store b3"); // Unwind to block 1 - store.unwind_history(b2).await.expect("unwind"); + store.unwind_history(b2).expect("unwind"); // Verify account history let tx = store.env.tx().expect("tx"); @@ -3426,8 +3400,8 @@ mod tests { ); } - #[tokio::test] - async fn test_unwind_history_with_trie_nodes() { + #[test] + fn test_unwind_history_with_trie_nodes() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3451,13 +3425,13 @@ mod tests { let b2 = BlockWithParent::new(b1.block.hash, NumHash::new(2, B256::random())); let b3 = BlockWithParent::new(b2.block.hash, NumHash::new(3, B256::random())); - store.set_earliest_block_number_hash(b0.number, b0.hash).await.expect("set earliest"); - store.store_trie_updates(b1, make_diff(path1, node1.clone())).await.expect("store b1"); - store.store_trie_updates(b2, make_diff(path2, node2.clone())).await.expect("store b2"); - store.store_trie_updates(b3, make_diff(path1, node2.clone())).await.expect("store b3"); + store.set_earliest_block_number_hash(b0.number, b0.hash).expect("set earliest"); + store.store_trie_updates(b1, make_diff(path1, node1)).expect("store b1"); + store.store_trie_updates(b2, make_diff(path2, node2.clone())).expect("store b2"); + store.store_trie_updates(b3, make_diff(path1, node2)).expect("store b3"); // Unwind to block 1 - store.unwind_history(b2).await.expect("unwind"); + store.unwind_history(b2).expect("unwind"); // Verify trie node history let tx = store.env.tx().expect("tx"); @@ -3477,8 +3451,8 @@ mod tests { ); } - #[tokio::test] - async fn test_unwind_history_comprehensive() { + #[test] + fn test_unwind_history_comprehensive() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3499,32 +3473,32 @@ mod tests { // Block 0: Set earliest block let b0 = NumHash::new(0, B256::random()); - store.set_earliest_block_number_hash(b0.number, b0.hash).await.expect("set earliest"); + store.set_earliest_block_number_hash(b0.number, b0.hash).expect("set earliest"); // Block 1: Insert multiple types of data let b1 = BlockWithParent::new(b0.hash, NumHash::new(1, B256::random())); let mut diff1_trie_updates = TrieUpdates::default(); let mut diff1_post_state = HashedPostState::default(); diff1_post_state.accounts.insert(addr1, Some(acc1)); - diff1_trie_updates.account_nodes.insert(path1, node1.clone()); + diff1_trie_updates.account_nodes.insert(path1, node1); let mut storage1 = HashedStorage::default(); storage1.storage.insert(slot1, U256::from(1111)); diff1_post_state.storages.insert(addr1, storage1); let mut storage_updates1 = StorageTrieUpdates::default(); - storage_updates1.storage_nodes.insert(storage_path1, storage_node1.clone()); + storage_updates1.storage_nodes.insert(storage_path1, storage_node1); diff1_trie_updates.storage_tries.insert(addr1, storage_updates1); let diff1 = BlockStateDiff { sorted_trie_updates: diff1_trie_updates.into_sorted(), sorted_post_state: diff1_post_state.into_sorted(), }; - store.store_trie_updates(b1, diff1).await.expect("store b1"); + store.store_trie_updates(b1, diff1).expect("store b1"); // Block 2: More updates let b2 = BlockWithParent::new(b1.block.hash, NumHash::new(2, B256::random())); let mut diff2_trie_updates = TrieUpdates::default(); let mut diff2_post_state = HashedPostState::default(); diff2_post_state.accounts.insert(addr2, Some(acc2)); - diff2_trie_updates.account_nodes.insert(path2, node2.clone()); + diff2_trie_updates.account_nodes.insert(path2, node2); let mut storage2 = HashedStorage::default(); storage2.storage.insert(slot2, U256::from(2222)); diff2_post_state.storages.insert(addr2, storage2); @@ -3532,7 +3506,7 @@ mod tests { sorted_trie_updates: diff2_trie_updates.into_sorted(), sorted_post_state: diff2_post_state.into_sorted(), }; - store.store_trie_updates(b2, diff2).await.expect("store b2"); + store.store_trie_updates(b2, diff2).expect("store b2"); // Block 3: Additional updates let b3 = BlockWithParent::new(b2.block.hash, NumHash::new(3, B256::random())); @@ -3542,10 +3516,10 @@ mod tests { sorted_trie_updates: TrieUpdatesSorted::default(), sorted_post_state: diff3_post_state.into_sorted(), }; - store.store_trie_updates(b3, diff3).await.expect("store b3"); + store.store_trie_updates(b3, diff3).expect("store b3"); // Unwind to block 1 - store.unwind_history(b2).await.expect("unwind"); + store.unwind_history(b2).expect("unwind"); let tx = store.env.tx().expect("tx"); @@ -3594,21 +3568,21 @@ mod tests { assert!(change_cur.seek_exact(3).unwrap().is_none(), "Block 3 changeset should be removed"); } - #[tokio::test] - async fn test_unwind_history_empty_chain() { + #[test] + fn test_unwind_history_empty_chain() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); // Try to unwind when there's nothing stored yet let unwind_to = BlockWithParent::new(B256::ZERO, NumHash::new(0, B256::ZERO)); - let result = store.unwind_history(unwind_to).await; + let result = store.unwind_history(unwind_to); // Should succeed (no-op) assert!(result.is_ok(), "Unwinding empty chain should succeed"); } - #[tokio::test] - async fn test_unwind_history_idempotent() { + #[test] + fn test_unwind_history_idempotent() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3628,16 +3602,16 @@ mod tests { let b2 = BlockWithParent::new(b1.block.hash, NumHash::new(2, B256::random())); let b3 = BlockWithParent::new(b2.block.hash, NumHash::new(3, B256::random())); - store.set_earliest_block_number_hash(b0.number, b0.hash).await.expect("set earliest"); - store.store_trie_updates(b1, make_diff(10)).await.expect("store b1"); - store.store_trie_updates(b2, make_diff(20)).await.expect("store b2"); - store.store_trie_updates(b3, make_diff(30)).await.expect("store b3"); + store.set_earliest_block_number_hash(b0.number, b0.hash).expect("set earliest"); + store.store_trie_updates(b1, make_diff(10)).expect("store b1"); + store.store_trie_updates(b2, make_diff(20)).expect("store b2"); + store.store_trie_updates(b3, make_diff(30)).expect("store b3"); // Unwind to block 1 - store.unwind_history(b2).await.expect("first unwind"); + store.unwind_history(b2).expect("first unwind"); // Unwind again to the same block (should be idempotent) - store.unwind_history(b2).await.expect("second unwind"); + store.unwind_history(b2).expect("second unwind"); // Verify state is still correct let tx = store.env.tx().expect("tx"); @@ -3648,8 +3622,8 @@ mod tests { assert!(cur.seek_by_key_subkey(addr, 3).unwrap().is_none(), "Block 3 should be removed"); } - #[tokio::test] - async fn test_unwind_history_beyond_latest() { + #[test] + fn test_unwind_history_beyond_latest() { let dir = TempDir::new().unwrap(); let store = MdbxProofsStorage::new(dir.path()).expect("env"); @@ -3671,13 +3645,13 @@ mod tests { let b4 = BlockWithParent::new(b3.block.hash, NumHash::new(4, B256::random())); let b5 = BlockWithParent::new(b4.block.hash, NumHash::new(5, B256::random())); - store.set_earliest_block_number_hash(b0.number, b0.hash).await.expect("set earliest"); - store.store_trie_updates(b1, make_diff(10)).await.expect("store b1"); - store.store_trie_updates(b2, make_diff(20)).await.expect("store b2"); - store.store_trie_updates(b3, make_diff(30)).await.expect("store b3"); + store.set_earliest_block_number_hash(b0.number, b0.hash).expect("set earliest"); + store.store_trie_updates(b1, make_diff(10)).expect("store b1"); + store.store_trie_updates(b2, make_diff(20)).expect("store b2"); + store.store_trie_updates(b3, make_diff(30)).expect("store b3"); // Unwind to block 1 - store.unwind_history(b5).await.expect("first unwind"); + store.unwind_history(b5).expect("first unwind"); // Verify state is still correct let tx = store.env.tx().expect("tx"); diff --git a/crates/optimism/trie/src/in_memory.rs b/crates/optimism/trie/src/in_memory.rs index ea1b1892f91..8a2b650460d 100644 --- a/crates/optimism/trie/src/in_memory.rs +++ b/crates/optimism/trie/src/in_memory.rs @@ -485,7 +485,7 @@ impl OpProofsStore for InMemoryProofsStorage { type StorageCursor<'tx> = InMemoryStorageCursor; type AccountHashedCursor<'tx> = InMemoryAccountCursor; - async fn store_account_branches( + fn store_account_branches( &self, updates: Vec<(Nibbles, Option)>, ) -> OpProofsStorageResult<()> { @@ -498,7 +498,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(()) } - async fn store_storage_branches( + fn store_storage_branches( &self, hashed_address: B256, items: Vec<(Nibbles, Option)>, @@ -512,7 +512,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(()) } - async fn store_hashed_accounts( + fn store_hashed_accounts( &self, accounts: Vec<(B256, Option)>, ) -> OpProofsStorageResult<()> { @@ -525,7 +525,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(()) } - async fn store_hashed_storages( + fn store_hashed_storages( &self, hashed_address: B256, storages: Vec<(B256, U256)>, @@ -539,12 +539,12 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(()) } - async fn get_earliest_block_number(&self) -> OpProofsStorageResult> { + fn get_earliest_block_number(&self) -> OpProofsStorageResult> { let inner = self.inner.read(); Ok(inner.earliest_block) } - async fn get_latest_block_number(&self) -> OpProofsStorageResult> { + fn get_latest_block_number(&self) -> OpProofsStorageResult> { let inner = self.inner.read(); // Find the latest block number from trie_updates let latest_block = inner.trie_updates.keys().max().copied(); @@ -588,7 +588,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(InMemoryAccountCursor::new(&inner, max_block_number)) } - async fn store_trie_updates( + fn store_trie_updates( &self, block_ref: BlockWithParent, block_state_diff: BlockStateDiff, @@ -598,7 +598,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(inner.store_trie_updates(block_ref.block.number, block_state_diff)) } - async fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { + fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { let inner = self.inner.read(); let trie_updates = inner.trie_updates.get(&block_number).cloned().unwrap_or_default(); @@ -607,7 +607,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(BlockStateDiff { sorted_trie_updates: trie_updates, sorted_post_state: post_state }) } - async fn prune_earliest_state( + fn prune_earliest_state( &self, new_earliest_block_ref: BlockWithParent, ) -> OpProofsStorageResult { @@ -694,10 +694,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(write_counts) } - async fn unwind_history( - &self, - unwind_upto_block: BlockWithParent, - ) -> OpProofsStorageResult<()> { + fn unwind_history(&self, unwind_upto_block: BlockWithParent) -> OpProofsStorageResult<()> { let mut inner = self.inner.write(); let unwind_upto_block_number = unwind_upto_block.block.number - 1; @@ -712,7 +709,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(()) } - async fn replace_updates( + fn replace_updates( &self, latest_common_block: BlockNumHash, blocks_to_add: Vec<(BlockWithParent, BlockStateDiff)>, @@ -735,7 +732,7 @@ impl OpProofsStore for InMemoryProofsStorage { Ok(()) } - async fn set_earliest_block_number( + fn set_earliest_block_number( &self, block_number: u64, hash: B256, @@ -754,21 +751,21 @@ mod tests { use alloy_primitives::U256; use reth_primitives_traits::Account; - #[tokio::test] - async fn test_in_memory_storage_basic_operations() -> Result<(), OpProofsStorageError> { + #[test] + fn test_in_memory_storage_basic_operations() -> Result<(), OpProofsStorageError> { let storage = InMemoryProofsStorage::new(); // Test setting earliest block let block_hash = B256::random(); - storage.set_earliest_block_number(1, block_hash).await?; - let earliest = storage.get_earliest_block_number().await?; + storage.set_earliest_block_number(1, block_hash)?; + let earliest = storage.get_earliest_block_number()?; assert_eq!(earliest, Some((1, block_hash))); // Test storing and retrieving accounts let account = Account { nonce: 1, balance: U256::from(100), bytecode_hash: None }; let hashed_address = B256::random(); - storage.store_hashed_accounts(vec![(hashed_address, Some(account))]).await?; + storage.store_hashed_accounts(vec![(hashed_address, Some(account))])?; let _cursor = storage.account_hashed_cursor(10)?; // Note: cursor testing would require more complex setup with proper seek/next operations @@ -776,8 +773,8 @@ mod tests { Ok(()) } - #[tokio::test] - async fn test_trie_updates_storage() -> Result<(), OpProofsStorageError> { + #[test] + fn test_trie_updates_storage() -> Result<(), OpProofsStorageError> { let storage = InMemoryProofsStorage::new(); let sorted_trie_updates = TrieUpdatesSorted::default(); @@ -789,9 +786,9 @@ mod tests { const BLOCK: BlockWithParent = BlockWithParent::new(B256::ZERO, NumHash::new(5, B256::ZERO)); - storage.store_trie_updates(BLOCK, block_state_diff).await?; + storage.store_trie_updates(BLOCK, block_state_diff)?; - let retrieved_diff = storage.fetch_trie_updates(BLOCK.block.number).await?; + let retrieved_diff = storage.fetch_trie_updates(BLOCK.block.number)?; assert_eq!(retrieved_diff.sorted_trie_updates, sorted_trie_updates); assert_eq!(retrieved_diff.sorted_post_state, sorted_post_state); diff --git a/crates/optimism/trie/src/initialize.rs b/crates/optimism/trie/src/initialize.rs index 8b7121c5cc7..4339acc05a4 100644 --- a/crates/optimism/trie/src/initialize.rs +++ b/crates/optimism/trie/src/initialize.rs @@ -136,7 +136,7 @@ impl { /// Initialize a table from a source iterator to a storage function. Handles batching and /// logging. - async fn initialize< + fn initialize< I: Iterator> + InitTable, Key: CompletionEstimatable + Clone + 'static, Value: Clone + 'static, @@ -196,14 +196,14 @@ impl if entries.len() >= storage_threshold { info!("Storing {} entries, total entries: {}", name, total_entries); - I::store_entries(storage, entries).await?; + I::store_entries(storage, entries)?; entries = Vec::new(); } } if !entries.is_empty() { info!("Storing final {} entries", name); - I::store_entries(storage, entries).await?; + I::store_entries(storage, entries)?; } info!("{} initialization complete: {} entries", name, total_entries); @@ -211,7 +211,7 @@ impl } /// Initialize hashed accounts data - async fn initialize_hashed_accounts( + fn initialize_hashed_accounts( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -230,14 +230,13 @@ impl source, INITIALIZE_STORAGE_THRESHOLD, INITIALIZE_LOG_THRESHOLD, - ) - .await?; + )?; Ok(()) } /// Initialize hashed storage data - async fn initialize_hashed_storages( + fn initialize_hashed_storages( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -256,14 +255,13 @@ impl source, INITIALIZE_STORAGE_THRESHOLD, INITIALIZE_LOG_THRESHOLD, - ) - .await?; + )?; Ok(()) } /// Initialize accounts trie data - async fn initialize_accounts_trie( + fn initialize_accounts_trie( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -282,14 +280,13 @@ impl source, INITIALIZE_STORAGE_THRESHOLD, INITIALIZE_LOG_THRESHOLD, - ) - .await?; + )?; Ok(()) } /// Initialize storage trie data - async fn initialize_storages_trie( + fn initialize_storages_trie( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -311,21 +308,17 @@ impl source, INITIALIZE_STORAGE_THRESHOLD, INITIALIZE_LOG_THRESHOLD, - ) - .await?; + )?; Ok(()) } /// Run complete initialization of all preimage data - async fn initialize_trie( - &self, - anchor: InitialStateAnchor, - ) -> Result<(), OpProofsStorageError> { - self.initialize_hashed_accounts(anchor.latest_hashed_account_key).await?; - self.initialize_hashed_storages(anchor.latest_hashed_storage_key).await?; - self.initialize_storages_trie(anchor.latest_storage_trie_key).await?; - self.initialize_accounts_trie(anchor.latest_account_trie_key).await?; + fn initialize_trie(&self, anchor: InitialStateAnchor) -> Result<(), OpProofsStorageError> { + self.initialize_hashed_accounts(anchor.latest_hashed_account_key)?; + self.initialize_hashed_storages(anchor.latest_hashed_storage_key)?; + self.initialize_storages_trie(anchor.latest_storage_trie_key)?; + self.initialize_accounts_trie(anchor.latest_account_trie_key)?; Ok(()) } @@ -345,23 +338,21 @@ impl } /// Run the initialization job. - pub async fn run(&self, best_number: u64, best_hash: B256) -> Result<(), OpProofsStorageError> { - let anchor = self.storage.initial_state_anchor().await?; + pub fn run(&self, best_number: u64, best_hash: B256) -> Result<(), OpProofsStorageError> { + let anchor = self.storage.initial_state_anchor()?; match anchor.status { InitialStateStatus::Completed => return Ok(()), InitialStateStatus::NotStarted => { - self.storage - .set_initial_state_anchor(BlockNumHash::new(best_number, best_hash)) - .await?; + self.storage.set_initial_state_anchor(BlockNumHash::new(best_number, best_hash))?; } InitialStateStatus::InProgress => { self.validate_anchor_block(&anchor, best_number, best_hash)?; } } - self.initialize_trie(anchor).await?; - self.storage.commit_initial_state().await?; + self.initialize_trie(anchor)?; + self.storage.commit_initial_state()?; Ok(()) } @@ -378,7 +369,7 @@ trait InitTable { fn store_entries( store: &impl OpProofsStore, entries: impl IntoIterator, - ) -> impl Future>; + ) -> Result<(), OpProofsStorageError>; } impl InitTable for HashedAccountsInit { @@ -386,15 +377,13 @@ impl InitTable for HashedAccountsInit { type Value = Account; /// Save mapping of hashed addresses to accounts to storage. - async fn store_entries( + fn store_entries( store: &impl OpProofsStore, entries: impl IntoIterator, ) -> Result<(), OpProofsStorageError> { - store - .store_hashed_accounts( - entries.into_iter().map(|(address, account)| (address, Some(account))).collect(), - ) - .await?; + store.store_hashed_accounts( + entries.into_iter().map(|(address, account)| (address, Some(account))).collect(), + )?; Ok(()) } } @@ -404,7 +393,7 @@ impl InitTable for HashedStoragesInit { type Value = StorageEntry; /// Save mapping of hashed addresses to storage entries to storage. - async fn store_entries( + fn store_entries( store: &impl OpProofsStore, entries: impl IntoIterator, ) -> Result<(), OpProofsStorageError> { @@ -418,7 +407,7 @@ impl InitTable for HashedStoragesInit { } // Store each address's storage entries for (address, storages) in by_address { - store.store_hashed_storages(address, storages).await?; + store.store_hashed_storages(address, storages)?; } Ok(()) @@ -430,15 +419,13 @@ impl InitTable for AccountsTrieInit { type Value = BranchNodeCompact; /// Save mapping of account trie paths to branch nodes to storage. - async fn store_entries( + fn store_entries( store: &impl OpProofsStore, entries: impl IntoIterator, ) -> Result<(), OpProofsStorageError> { - store - .store_account_branches( - entries.into_iter().map(|(path, branch)| (path.0, Some(branch))).collect(), - ) - .await?; + store.store_account_branches( + entries.into_iter().map(|(path, branch)| (path.0, Some(branch))).collect(), + )?; Ok(()) } @@ -449,7 +436,7 @@ impl InitTable for StoragesTrieInit { type Value = StorageTrieEntry; /// Save mapping of hashed addresses to storage trie entries to storage. - async fn store_entries( + fn store_entries( store: &impl OpProofsStore, entries: impl IntoIterator, ) -> Result<(), OpProofsStorageError> { @@ -466,7 +453,7 @@ impl InitTable for StoragesTrieInit { } // Store each address's storage trie branches for (address, branches) in by_address { - store.store_storage_branches(address, branches).await?; + store.store_storage_branches(address, branches)?; } Ok(()) @@ -511,8 +498,8 @@ mod tests { } } - #[tokio::test] - async fn test_initialize_hashed_accounts() { + #[test] + fn test_initialize_hashed_accounts() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -548,7 +535,7 @@ mod tests { // Run initialization let tx = db.tx().unwrap(); let job = InitializationJob::new(storage.clone(), tx); - job.initialize_hashed_accounts(None).await.unwrap(); + job.initialize_hashed_accounts(None).unwrap(); // Verify data was stored (will be in sorted order) let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); @@ -562,8 +549,8 @@ mod tests { assert_eq!(count, 3); } - #[tokio::test] - async fn test_initialize_hashed_storage() { + #[test] + fn test_initialize_hashed_storage() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -599,7 +586,7 @@ mod tests { // Run initialization let tx = db.tx().unwrap(); let job = InitializationJob::new(storage.clone(), tx); - job.initialize_hashed_storages(None).await.unwrap(); + job.initialize_hashed_storages(None).unwrap(); // Verify data was stored for addr1 let mut storage_cursor = storage.storage_hashed_cursor(addr1, 100).unwrap(); @@ -621,8 +608,8 @@ mod tests { assert_eq!(found[0], (storage_entries[2].1.key, storage_entries[2].1.value)); } - #[tokio::test] - async fn test_initialize_accounts_trie() { + #[test] + fn test_initialize_accounts_trie() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -635,7 +622,7 @@ mod tests { let nodes = vec![ (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![1])), branch.clone()), (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![2])), branch.clone()), - (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![3])), branch.clone()), + (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![3])), branch), ]; for (path, node) in &nodes { @@ -647,7 +634,7 @@ mod tests { // Run initialization let tx = db.tx().unwrap(); let job = InitializationJob::new(storage.clone(), tx); - job.initialize_accounts_trie(None).await.unwrap(); + job.initialize_accounts_trie(None).unwrap(); // Verify data was stored let mut trie_cursor = storage.account_trie_cursor(100).unwrap(); @@ -659,8 +646,8 @@ mod tests { assert_eq!(count, 3); } - #[tokio::test] - async fn test_initialize_storages_trie() { + #[test] + fn test_initialize_storages_trie() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -692,7 +679,7 @@ mod tests { addr2, StorageTrieEntry { nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![3])), - node: branch.clone(), + node: branch, }, ), ]; @@ -706,7 +693,7 @@ mod tests { // Run initialization let tx = db.tx().unwrap(); let job = InitializationJob::new(storage.clone(), tx); - job.initialize_storages_trie(None).await.unwrap(); + job.initialize_storages_trie(None).unwrap(); // Verify data was stored for addr1 let mut trie_cursor = storage.storage_trie_cursor(addr1, 100).unwrap(); @@ -728,8 +715,8 @@ mod tests { assert_eq!(found[0], nodes[2].1.nibbles.0); } - #[tokio::test] - async fn test_full_initialize_run() { + #[test] + fn test_full_initialize_run() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -787,16 +774,13 @@ mod tests { let best_hash = B256::repeat_byte(0x42); // Should be None initially - assert_eq!(storage.initial_state_anchor().await.unwrap().block, None); - assert_eq!(storage.get_earliest_block_number().await.unwrap(), None); + assert_eq!(storage.initial_state_anchor().unwrap().block, None); + assert_eq!(storage.get_earliest_block_number().unwrap(), None); - job.run(best_number, best_hash).await.unwrap(); + job.run(best_number, best_hash).unwrap(); // Should be set after initialization - assert_eq!( - storage.get_earliest_block_number().await.unwrap(), - Some((best_number, best_hash)) - ); + assert_eq!(storage.get_earliest_block_number().unwrap(), Some((best_number, best_hash))); // Verify data was initialized let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); @@ -812,8 +796,8 @@ mod tests { assert!(storage_trie_cursor.next().unwrap().is_some()); } - #[tokio::test] - async fn test_initialize_run_skips_if_already_done() { + #[test] + fn test_initialize_run_skips_if_already_done() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -821,19 +805,18 @@ mod tests { // set and commit initial state anchor storage .set_initial_state_anchor(BlockNumHash::new(50, B256::repeat_byte(0x01))) - .await .expect("set anchor"); - storage.commit_initial_state().await.expect("commit anchor"); + storage.commit_initial_state().expect("commit anchor"); let tx = db.tx().unwrap(); let job = InitializationJob::new(storage.clone(), tx); // Run initialization - should skip - job.run(100, B256::repeat_byte(0x42)).await.unwrap(); + job.run(100, B256::repeat_byte(0x42)).unwrap(); // Should still have the old anchor let anchor_block = - storage.initial_state_anchor().await.expect("get anchor").block.expect("block"); + storage.initial_state_anchor().expect("get anchor").block.expect("block"); assert_eq!( Some((anchor_block.number, anchor_block.hash)), Some((50, B256::repeat_byte(0x01))) @@ -841,21 +824,18 @@ mod tests { // Should still have the old earliest block assert_eq!( - storage.get_earliest_block_number().await.unwrap(), + storage.get_earliest_block_number().unwrap(), Some((50, B256::repeat_byte(0x01))) ); } - #[tokio::test] - async fn test_initialize_resumes_hashed_accounts_with_no_dups() { + #[test] + fn test_initialize_resumes_hashed_accounts_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); - store - .set_initial_state_anchor(BlockNumHash::new(0, B256::default())) - .await - .expect("set anchor"); + store.set_initial_state_anchor(BlockNumHash::new(0, B256::default())).expect("set anchor"); // Phase 1 in source: k1, k2 let k1 = k(1); @@ -874,12 +854,12 @@ mod tests { { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); - job.initialize_hashed_accounts(None).await.unwrap(); + job.initialize_hashed_accounts(None).unwrap(); } // Resume point must be k2 (max) assert_eq!( - store.initial_state_anchor().await.expect("get anchor").latest_hashed_account_key, + store.initial_state_anchor().expect("get anchor").latest_hashed_account_key, Some(k2) ); @@ -900,12 +880,12 @@ mod tests { { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); - job.initialize_hashed_accounts(Some(k2)).await.unwrap(); + job.initialize_hashed_accounts(Some(k2)).unwrap(); } // Now resume point must be k4 assert_eq!( - store.initial_state_anchor().await.expect("get anchor").latest_hashed_account_key, + store.initial_state_anchor().expect("get anchor").latest_hashed_account_key, Some(k4) ); @@ -929,16 +909,13 @@ mod tests { } } - #[tokio::test] - async fn test_initialize_resumes_hashed_storages_with_no_dups() { + #[test] + fn test_initialize_resumes_hashed_storages_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); - store - .set_initial_state_anchor(BlockNumHash::new(0, B256::default())) - .await - .expect("set anchor"); + store.set_initial_state_anchor(BlockNumHash::new(0, B256::default())).expect("set anchor"); let a1 = k(0x10); let a2 = k(0x20); @@ -964,13 +941,12 @@ mod tests { { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); - job.initialize_hashed_storages(None).await.unwrap(); + job.initialize_hashed_storages(None).unwrap(); } // Latest key must be (a2, s21) because a2 > a1 let last1 = store .initial_state_anchor() - .await .expect("get anchor") .latest_hashed_storage_key .expect("ok"); @@ -989,13 +965,12 @@ mod tests { { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); - job.initialize_hashed_storages(Some(HashedStorageKey::new(a2, s21))).await.unwrap(); + job.initialize_hashed_storages(Some(HashedStorageKey::new(a2, s21))).unwrap(); } // Latest key now must be (a2, s22) let last2 = store .initial_state_anchor() - .await .expect("get anchor") .latest_hashed_storage_key .expect("ok"); @@ -1025,16 +1000,13 @@ mod tests { } } - #[tokio::test] - async fn test_initialize_resumes_accounts_trie_with_no_dups() { + #[test] + fn test_initialize_resumes_accounts_trie_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); - store - .set_initial_state_anchor(BlockNumHash::new(0, B256::default())) - .await - .expect("set anchor"); + store.set_initial_state_anchor(BlockNumHash::new(0, B256::default())).expect("set anchor"); let p1 = StoredNibbles(Nibbles::from_nibbles_unchecked(vec![1])); let p2 = StoredNibbles(Nibbles::from_nibbles_unchecked(vec![2])); @@ -1054,11 +1026,11 @@ mod tests { { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); - job.initialize_accounts_trie(None).await.unwrap(); + job.initialize_accounts_trie(None).unwrap(); } assert_eq!( - store.initial_state_anchor().await.expect("get anchor").latest_account_trie_key, + store.initial_state_anchor().expect("get anchor").latest_account_trie_key, Some(p2.clone()) ); @@ -1075,11 +1047,11 @@ mod tests { { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); - job.initialize_accounts_trie(Some(p2.clone())).await.unwrap(); + job.initialize_accounts_trie(Some(p2.clone())).unwrap(); } assert_eq!( - store.initial_state_anchor().await.expect("get anchor").latest_account_trie_key, + store.initial_state_anchor().expect("get anchor").latest_account_trie_key, Some(p4.clone()) ); @@ -1096,16 +1068,13 @@ mod tests { assert_eq!(got[3], p4.0); } - #[tokio::test] - async fn test_initialize_resumes_storages_trie_with_no_dups() { + #[test] + fn test_initialize_resumes_storages_trie_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); - store - .set_initial_state_anchor(BlockNumHash::new(0, B256::default())) - .await - .expect("set anchor"); + store.set_initial_state_anchor(BlockNumHash::new(0, B256::default())).expect("set anchor"); let a1 = k(0x10); let a2 = k(0x20); @@ -1135,16 +1104,12 @@ mod tests { { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); - job.initialize_storages_trie(None).await.unwrap(); + job.initialize_storages_trie(None).unwrap(); } // Latest must be (a2, n2) because a2 > a1 - let last1 = store - .initial_state_anchor() - .await - .expect("get anchor") - .latest_storage_trie_key - .expect("ok"); + let last1 = + store.initial_state_anchor().expect("get anchor").latest_storage_trie_key.expect("ok"); assert_eq!(last1.hashed_address, a2); assert_eq!(last1.path.0, n2.0); @@ -1165,17 +1130,12 @@ mod tests { let tx = db.tx().unwrap(); let job = InitializationJob::new(store.clone(), tx); job.initialize_storages_trie(Some(StorageTrieKey::new(a2, StoredNibbles::from(n2.0)))) - .await .unwrap(); } // Latest must now be (a2,n3) - let last2 = store - .initial_state_anchor() - .await - .expect("get anchor") - .latest_storage_trie_key - .expect("ok"); + let last2 = + store.initial_state_anchor().expect("get anchor").latest_storage_trie_key.expect("ok"); assert_eq!(last2.hashed_address, a2); assert_eq!(last2.path.0, n3.0); diff --git a/crates/optimism/trie/src/live.rs b/crates/optimism/trie/src/live.rs index 7f404c9fdf6..0a4bb1b526f 100644 --- a/crates/optimism/trie/src/live.rs +++ b/crates/optimism/trie/src/live.rs @@ -36,7 +36,7 @@ where Store: 'tx + OpProofsStore + Clone + 'static, { /// Execute a block and store the updates in the storage. - pub async fn execute_and_store_block_updates( + pub fn execute_and_store_block_updates( &self, block: &RecoveredBlock>, ) -> Result<(), OpProofsStorageError> { @@ -44,10 +44,9 @@ where let start = Instant::now(); // ensure that we have the state of the parent block - let (Some((earliest, _)), Some((latest, _))) = ( - self.storage.get_earliest_block_number().await?, - self.storage.get_latest_block_number().await?, - ) else { + let (Some((earliest, _)), Some((latest, _))) = + (self.storage.get_earliest_block_number()?, self.storage.get_latest_block_number()?) + else { return Err(OpProofsStorageError::NoBlocksFound); }; @@ -97,16 +96,13 @@ where }); } - let update_result = self - .storage - .store_trie_updates( - block_ref, - BlockStateDiff { - sorted_trie_updates: trie_updates.into_sorted(), - sorted_post_state: hashed_state.into_sorted(), - }, - ) - .await?; + let update_result = self.storage.store_trie_updates( + block_ref, + BlockStateDiff { + sorted_trie_updates: trie_updates.into_sorted(), + sorted_post_state: hashed_state.into_sorted(), + }, + )?; operation_durations.total_duration_seconds = start.elapsed(); operation_durations.write_duration_seconds = operation_durations.total_duration_seconds - @@ -131,7 +127,7 @@ where } /// Store trie updates for a given block. - pub async fn store_block_updates( + pub fn store_block_updates( &self, block: BlockWithParent, sorted_trie_updates: TrieUpdatesSorted, @@ -142,8 +138,7 @@ where let storage_result = self .storage - .store_trie_updates(block, BlockStateDiff { sorted_trie_updates, sorted_post_state }) - .await?; + .store_trie_updates(block, BlockStateDiff { sorted_trie_updates, sorted_post_state })?; let write_duration = start.elapsed(); operation_durations.total_duration_seconds = write_duration; @@ -176,7 +171,7 @@ where /// /// * `new_blocks` - A vector of references to `RecoveredBlock` instances representing the new /// blocks to be added to the trie storage. - pub async fn unwind_and_store_block_updates( + pub fn unwind_and_store_block_updates( &self, block_updates: Vec<(BlockWithParent, Arc, Arc)>, ) -> Result<(), OpProofsStorageError> { @@ -202,7 +197,7 @@ where )); } - self.storage.replace_updates(latest_common_block, block_trie_updates).await?; + self.storage.replace_updates(latest_common_block, block_trie_updates)?; let write_duration = start.elapsed(); operation_durations.total_duration_seconds = write_duration; operation_durations.write_duration_seconds = write_duration; @@ -224,7 +219,7 @@ where /// Remove account, storage and trie updates from historical storage for all blocks from /// the specified block (inclusive). - pub async fn unwind_history(&self, to: BlockWithParent) -> Result<(), OpProofsStorageError> { - self.storage.unwind_history(to).await + pub fn unwind_history(&self, to: BlockWithParent) -> Result<(), OpProofsStorageError> { + self.storage.unwind_history(to) } } diff --git a/crates/optimism/trie/src/metrics.rs b/crates/optimism/trie/src/metrics.rs index cb9e4a9264e..03e9470a0cd 100644 --- a/crates/optimism/trie/src/metrics.rs +++ b/crates/optimism/trie/src/metrics.rs @@ -380,13 +380,13 @@ where Self: 'tx; #[inline] - async fn store_account_branches( + fn store_account_branches( &self, account_nodes: Vec<(Nibbles, Option)>, ) -> OpProofsStorageResult<()> { let count = account_nodes.len(); let start = Instant::now(); - let result = self.storage.store_account_branches(account_nodes).await; + let result = self.storage.store_account_branches(account_nodes); let duration = start.elapsed(); // Record per-item duration @@ -402,14 +402,14 @@ where } #[inline] - async fn store_storage_branches( + fn store_storage_branches( &self, hashed_address: B256, storage_nodes: Vec<(Nibbles, Option)>, ) -> OpProofsStorageResult<()> { let count = storage_nodes.len(); let start = Instant::now(); - let result = self.storage.store_storage_branches(hashed_address, storage_nodes).await; + let result = self.storage.store_storage_branches(hashed_address, storage_nodes); let duration = start.elapsed(); // Record per-item duration @@ -425,13 +425,13 @@ where } #[inline] - async fn store_hashed_accounts( + fn store_hashed_accounts( &self, accounts: Vec<(B256, Option)>, ) -> OpProofsStorageResult<()> { let count = accounts.len(); let start = Instant::now(); - let result = self.storage.store_hashed_accounts(accounts).await; + let result = self.storage.store_hashed_accounts(accounts); let duration = start.elapsed(); // Record per-item duration @@ -447,14 +447,14 @@ where } #[inline] - async fn store_hashed_storages( + fn store_hashed_storages( &self, hashed_address: B256, storages: Vec<(B256, U256)>, ) -> OpProofsStorageResult<()> { let count = storages.len(); let start = Instant::now(); - let result = self.storage.store_hashed_storages(hashed_address, storages).await; + let result = self.storage.store_hashed_storages(hashed_address, storages); let duration = start.elapsed(); // Record per-item duration @@ -470,13 +470,13 @@ where } #[inline] - async fn get_earliest_block_number(&self) -> OpProofsStorageResult> { - self.storage.get_earliest_block_number().await + fn get_earliest_block_number(&self) -> OpProofsStorageResult> { + self.storage.get_earliest_block_number() } #[inline] - async fn get_latest_block_number(&self) -> OpProofsStorageResult> { - self.storage.get_latest_block_number().await + fn get_latest_block_number(&self) -> OpProofsStorageResult> { + self.storage.get_latest_block_number() } #[inline] @@ -519,52 +519,52 @@ where // metrics are handled by the live trie collector #[inline] - async fn store_trie_updates( + fn store_trie_updates( &self, block_ref: BlockWithParent, block_state_diff: BlockStateDiff, ) -> OpProofsStorageResult { - let result = self.storage.store_trie_updates(block_ref, block_state_diff).await?; + let result = self.storage.store_trie_updates(block_ref, block_state_diff)?; self.metrics.block_metrics.latest_number.set(block_ref.block.number as f64); Ok(result) } // no metrics for these #[inline] - async fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { - self.storage.fetch_trie_updates(block_number).await + fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { + self.storage.fetch_trie_updates(block_number) } #[inline] - async fn prune_earliest_state( + fn prune_earliest_state( &self, new_earliest_block_ref: BlockWithParent, ) -> OpProofsStorageResult { self.metrics.block_metrics.earliest_number.set(new_earliest_block_ref.block.number as f64); - self.storage.prune_earliest_state(new_earliest_block_ref).await + self.storage.prune_earliest_state(new_earliest_block_ref) } #[inline] - async fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()> { - self.storage.unwind_history(to).await + fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()> { + self.storage.unwind_history(to) } #[inline] - async fn replace_updates( + fn replace_updates( &self, latest_common_block: BlockNumHash, blocks_to_add: Vec<(BlockWithParent, BlockStateDiff)>, ) -> OpProofsStorageResult<()> { - self.storage.replace_updates(latest_common_block, blocks_to_add).await + self.storage.replace_updates(latest_common_block, blocks_to_add) } #[inline] - async fn set_earliest_block_number( + fn set_earliest_block_number( &self, block_number: u64, hash: B256, ) -> OpProofsStorageResult<()> { self.metrics.block_metrics.earliest_number.set(block_number as f64); - self.storage.set_earliest_block_number(block_number, hash).await + self.storage.set_earliest_block_number(block_number, hash) } } @@ -573,18 +573,18 @@ where S: OpProofsInitialStateStore, { #[inline] - async fn initial_state_anchor(&self) -> OpProofsStorageResult { - self.storage.initial_state_anchor().await + fn initial_state_anchor(&self) -> OpProofsStorageResult { + self.storage.initial_state_anchor() } #[inline] - async fn set_initial_state_anchor(&self, anchor: BlockNumHash) -> OpProofsStorageResult<()> { - self.storage.set_initial_state_anchor(anchor).await + fn set_initial_state_anchor(&self, anchor: BlockNumHash) -> OpProofsStorageResult<()> { + self.storage.set_initial_state_anchor(anchor) } #[inline] - async fn commit_initial_state(&self) -> OpProofsStorageResult { - let block = self.storage.commit_initial_state().await?; + fn commit_initial_state(&self) -> OpProofsStorageResult { + let block = self.storage.commit_initial_state()?; self.metrics.block_metrics.earliest_number.set(block.number as f64); Ok(block) } diff --git a/crates/optimism/trie/src/prune/pruner.rs b/crates/optimism/trie/src/prune/pruner.rs index c8d5c0298da..a1666c55277 100644 --- a/crates/optimism/trie/src/prune/pruner.rs +++ b/crates/optimism/trie/src/prune/pruner.rs @@ -51,14 +51,14 @@ where P: OpProofsStore, H: BlockHashReader, { - async fn run_inner(&self) -> OpProofStoragePrunerResult { - let latest_block_opt = self.provider.get_latest_block_number().await?; + fn run_inner(&self) -> OpProofStoragePrunerResult { + let latest_block_opt = self.provider.get_latest_block_number()?; if latest_block_opt.is_none() { trace!(target: "trie::pruner", "No latest blocks in the proof storage"); return Ok(PrunerOutput::default()) } - let earliest_block_opt = self.provider.get_earliest_block_number().await?; + let earliest_block_opt = self.provider.get_earliest_block_number()?; if earliest_block_opt.is_none() { trace!(target: "trie::pruner", "No earliest blocks in the proof storage"); return Ok(PrunerOutput::default()) @@ -96,7 +96,7 @@ where let batch_end_block = cmp::min(current_earliest_block + self.prune_batch_size, target_earliest_block); - let batch_output = self.prune_batch(current_earliest_block, batch_end_block).await?; + let batch_output = self.prune_batch(current_earliest_block, batch_end_block)?; prune_output.extend_ref(batch_output); @@ -108,11 +108,7 @@ where } /// Prunes a single batch of blocks. - async fn prune_batch( - &self, - start_block: u64, - end_block: u64, - ) -> Result { + fn prune_batch(&self, start_block: u64, end_block: u64) -> Result { let batch_start_time = Instant::now(); // Fetch block hashes for the new earliest block of this batch @@ -151,7 +147,7 @@ where }; // Commit this batch - let write_counts = self.provider.prune_earliest_state(block_with_parent).await?; + let write_counts = self.provider.prune_earliest_state(block_with_parent)?; let duration = batch_start_time.elapsed(); let batch_output = PrunerOutput { duration, start_block, end_block, write_counts }; @@ -169,8 +165,8 @@ where } /// Run the pruner - pub async fn run(&self) { - let res = self.run_inner().await; + pub fn run(&self) { + let res = self.run_inner(); if let Err(e) = res { error!(target: "trie::pruner", err=%e, "Pruner failed"); return; @@ -234,7 +230,7 @@ mod tests { let store: OpProofsStorage> = OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); - store.set_earliest_block_number(0, B256::ZERO).await.expect("set earliest"); + store.set_earliest_block_number(0, B256::ZERO).expect("set earliest"); // --- entities --- // accounts @@ -289,7 +285,7 @@ mod tests { hs.storage.insert(s2, U256::from(200)); d_post_state.storages.insert(stor_addr, hs); - d_trie_updates.account_nodes.insert(p1, node_p1.clone()); + d_trie_updates.account_nodes.insert(p1, node_p1); let e = d_trie_updates.storage_tries.entry(stor_addr).or_default(); e.storage_nodes.insert(st1, BranchNodeCompact::default()); @@ -297,7 +293,7 @@ mod tests { sorted_post_state: d_post_state.into_sorted(), sorted_trie_updates: d_trie_updates.into_sorted(), }; - store.store_trie_updates(b1, d).await.expect("b1"); + store.store_trie_updates(b1, d).expect("b1"); parent = b256(1); } @@ -330,7 +326,7 @@ mod tests { sorted_post_state: d_post_state.into_sorted(), sorted_trie_updates: d_trie_updates.into_sorted(), }; - store.store_trie_updates(b2, d).await.expect("b2"); + store.store_trie_updates(b2, d).expect("b2"); parent = b256(2); } @@ -356,7 +352,7 @@ mod tests { sorted_post_state: d_post_state.into_sorted(), sorted_trie_updates: d_trie_updates.into_sorted(), }; - store.store_trie_updates(b3, d).await.expect("b3"); + store.store_trie_updates(b3, d).expect("b3"); parent = b256(3); } @@ -383,7 +379,7 @@ mod tests { sorted_post_state: d_post_state.into_sorted(), sorted_trie_updates: d_trie_updates.into_sorted(), }; - store.store_trie_updates(b4, d).await.expect("b4"); + store.store_trie_updates(b4, d).expect("b4"); parent = b256(4); } @@ -406,13 +402,13 @@ mod tests { sorted_post_state: d_post_state.into_sorted(), sorted_trie_updates: TrieUpdatesSorted::default(), }; - store.store_trie_updates(b5, d).await.expect("b5"); + store.store_trie_updates(b5, d).expect("b5"); } // sanity: earliest=0, latest=5 { - let e = store.get_earliest_block_number().await.expect("earliest").expect("some"); - let l = store.get_latest_block_number().await.expect("latest").expect("some"); + let e = store.get_earliest_block_number().expect("earliest").expect("some"); + let l = store.get_latest_block_number().expect("latest").expect("some"); assert_eq!(e.0, 0); assert_eq!(l.0, 5); } @@ -431,14 +427,14 @@ mod tests { .returning(move |_| Ok(Some(b256(3)))); let pruner = OpProofStoragePruner::new(store.clone(), block_hash_reader, 1, 1000); - let out = pruner.run_inner().await.expect("pruner ok"); + let out = pruner.run_inner().expect("pruner ok"); assert_eq!(out.start_block, 0); assert_eq!(out.end_block, 4, "pruned up to 4 (inclusive); new earliest is 4"); // proof window moved: earliest=4, latest=5 { - let e = store.get_earliest_block_number().await.expect("earliest").expect("some"); - let l = store.get_latest_block_number().await.expect("latest").expect("some"); + let e = store.get_earliest_block_number().expect("earliest").expect("some"); + let l = store.get_latest_block_number().expect("latest").expect("some"); assert_eq!(e.0, 4); assert_eq!(e.1, b256(4)); assert_eq!(l.0, 5); @@ -483,12 +479,12 @@ mod tests { (s3, U256::from(300)), // block 2 ]; let updated_trie_accounts = vec![ - (p2, node_p2.clone()), // block 2 - (p3, node_p3.clone()), // block 4 + (p2, node_p2), // block 2 + (p3, node_p3), // block 4 ]; let updated_trie_storage = vec![ - (st2, node_st2.clone()), // block 2 - (st3, node_st3.clone()), // block 4 + (st2, node_st2), // block 2 + (st3, node_st3), // block 4 ]; for (key, val) in updated_hashed_accounts { @@ -522,15 +518,15 @@ mod tests { let store: OpProofsStorage> = OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); - let earliest = store.get_earliest_block_number().await.unwrap(); - let latest = store.get_latest_block_number().await.unwrap(); + let earliest = store.get_earliest_block_number().unwrap(); + let latest = store.get_latest_block_number().unwrap(); println!("{:?} {:?}", earliest, latest); assert!(earliest.is_none()); assert!(latest.is_none()); let block_hash_reader = MockBlockHashReader::new(); let pruner = OpProofStoragePruner::new(store, block_hash_reader, 10, 1000); - let out = pruner.run_inner().await.expect("ok"); + let out = pruner.run_inner().expect("ok"); assert_eq!(out, PrunerOutput::default(), "should early-return default output"); } @@ -546,17 +542,16 @@ mod tests { // Write a single block to set *latest* only. store .store_trie_updates(block(3, B256::ZERO), BlockStateDiff::default()) - .await .expect("store b1"); - let earliest = store.get_earliest_block_number().await.unwrap(); - let latest = store.get_latest_block_number().await.unwrap(); + let earliest = store.get_earliest_block_number().unwrap(); + let latest = store.get_latest_block_number().unwrap(); assert!(earliest.is_none(), "earliest must remain None"); assert_eq!(latest.unwrap().0, 3); let block_hash_reader = MockBlockHashReader::new(); let pruner = OpProofStoragePruner::new(store, block_hash_reader, 1, 1000); - let out = pruner.run_inner().await.expect("ok"); + let out = pruner.run_inner().expect("ok"); assert_eq!(out, PrunerOutput::default(), "should early-return default output"); } @@ -572,22 +567,22 @@ mod tests { // Set earliest=4 explicitly let earliest_num = 4u64; let h4 = b256(4); - store.set_earliest_block_number(earliest_num, h4).await.expect("set earliest"); + store.set_earliest_block_number(earliest_num, h4).expect("set earliest"); // Set latest=5 by storing block 5 let b5 = block(5, h4); - store.store_trie_updates(b5, BlockStateDiff::default()).await.expect("store b5"); + store.store_trie_updates(b5, BlockStateDiff::default()).expect("store b5"); // Sanity: earliest=4, latest=5 => interval=1 - let e = store.get_earliest_block_number().await.unwrap().unwrap(); - let l = store.get_latest_block_number().await.unwrap().unwrap(); + let e = store.get_earliest_block_number().unwrap().unwrap(); + let l = store.get_latest_block_number().unwrap().unwrap(); assert_eq!(e.0, 4); assert_eq!(l.0, 5); // Require min_block_interval=2 (or greater) so interval < min let block_hash_reader = MockBlockHashReader::new(); let pruner = OpProofStoragePruner::new(store, block_hash_reader, 2, 1000); - let out = pruner.run_inner().await.expect("ok"); + let out = pruner.run_inner().expect("ok"); assert_eq!(out, PrunerOutput::default(), "no pruning should occur"); } } diff --git a/crates/optimism/trie/src/prune/task.rs b/crates/optimism/trie/src/prune/task.rs index 920dc9d8cf6..3251ff0ad93 100644 --- a/crates/optimism/trie/src/prune/task.rs +++ b/crates/optimism/trie/src/prune/task.rs @@ -54,7 +54,7 @@ where break; } _ = interval.tick() => { - self.pruner.run().await + self.pruner.run() } } } diff --git a/crates/optimism/trie/tests/lib.rs b/crates/optimism/trie/tests/lib.rs index 38ecc84c974..73f39d6d032 100644 --- a/crates/optimism/trie/tests/lib.rs +++ b/crates/optimism/trie/tests/lib.rs @@ -79,21 +79,20 @@ fn create_mdbx_proofs_storage() -> MdbxProofsStorage { /// Test basic storage and retrieval of earliest block number #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_earliest_block_operations( +fn test_earliest_block_operations( storage: S, ) -> Result<(), OpProofsStorageError> { // Initially should be None - let earliest = storage.get_earliest_block_number().await?; + let earliest = storage.get_earliest_block_number()?; assert!(earliest.is_none()); // Set earliest block let block_hash = B256::repeat_byte(0x42); - storage.set_earliest_block_number(100, block_hash).await?; + storage.set_earliest_block_number(100, block_hash)?; // Should retrieve the same values - let earliest = storage.get_earliest_block_number().await?; + let earliest = storage.get_earliest_block_number()?; assert_eq!(earliest, Some((100, block_hash))); Ok(()) @@ -102,11 +101,8 @@ async fn test_earliest_block_operations( /// Test storing and retrieving trie updates #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_trie_updates_operations( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_trie_updates_operations(storage: S) -> Result<(), OpProofsStorageError> { let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); let sorted_trie_updates = TrieUpdatesSorted::default(); let sorted_post_state = HashedPostStateSorted::default(); @@ -116,10 +112,10 @@ async fn test_trie_updates_operations( }; // Store trie updates - storage.store_trie_updates(block_ref, block_state_diff).await?; + storage.store_trie_updates(block_ref, block_state_diff)?; // Retrieve and verify - let retrieved_diff = storage.fetch_trie_updates(block_ref.block.number).await?; + let retrieved_diff = storage.fetch_trie_updates(block_ref.block.number)?; assert_eq!(retrieved_diff.sorted_trie_updates, sorted_trie_updates); assert_eq!(retrieved_diff.sorted_post_state, sorted_post_state); @@ -133,9 +129,8 @@ async fn test_trie_updates_operations( /// Test cursor operations on empty trie #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_cursor_empty_trie(storage: S) -> Result<(), OpProofsStorageError> { +fn test_cursor_empty_trie(storage: S) -> Result<(), OpProofsStorageError> { let mut cursor = storage.account_trie_cursor(100)?; // All operations should return None on empty trie @@ -150,16 +145,13 @@ async fn test_cursor_empty_trie(storage: S) -> Result<(), OpPr /// Test cursor operations with single entry #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_cursor_single_entry( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_cursor_single_entry(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); let branch = create_test_branch(); // Store single entry - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; @@ -179,11 +171,8 @@ async fn test_cursor_single_entry( /// Test cursor operations with multiple entries #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_cursor_multiple_entries( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_cursor_multiple_entries(storage: S) -> Result<(), OpProofsStorageError> { let paths = vec![ nibbles_from(vec![1]), nibbles_from(vec![1, 2]), @@ -194,7 +183,7 @@ async fn test_cursor_multiple_entries( // Store multiple entries for path in &paths { - storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; } let mut cursor = storage.account_trie_cursor(100)?; @@ -221,15 +210,12 @@ async fn test_cursor_multiple_entries( /// Test `seek_exact` with existing path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_seek_exact_existing_path( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_seek_exact_existing_path(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; let result = cursor.seek_exact(path)?.unwrap(); @@ -241,15 +227,14 @@ async fn test_seek_exact_existing_path( /// Test `seek_exact` with non-existing path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_seek_exact_non_existing_path( +fn test_seek_exact_non_existing_path( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; let non_existing = nibbles_from(vec![4, 5, 6]); @@ -261,15 +246,12 @@ async fn test_seek_exact_non_existing_path( /// Test `seek_exact` with empty path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_seek_exact_empty_path( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_seek_exact_empty_path(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; let result = cursor.seek_exact(Nibbles::default())?.unwrap(); @@ -281,15 +263,12 @@ async fn test_seek_exact_empty_path( /// Test seek to existing path #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_seek_to_existing_path( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_seek_to_existing_path(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2, 3]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; let result = cursor.seek(path)?.unwrap(); @@ -301,17 +280,16 @@ async fn test_seek_to_existing_path( /// Test seek between existing nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_seek_between_existing_nodes( +fn test_seek_between_existing_nodes( storage: S, ) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); let path2 = nibbles_from(vec![3]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path1, Some(branch.clone()))]).await?; - storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; // Seek to path between 1 and 3, should return path 3 @@ -325,15 +303,12 @@ async fn test_seek_between_existing_nodes( /// Test seek after all nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_seek_after_all_nodes( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_seek_after_all_nodes(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; // Seek to path after all nodes @@ -346,15 +321,12 @@ async fn test_seek_after_all_nodes( /// Test seek before all nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_seek_before_all_nodes( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_seek_before_all_nodes(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![5]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; // Seek to path before all nodes, should return first node @@ -372,15 +344,12 @@ async fn test_seek_before_all_nodes( /// Test next without prior seek #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_next_without_prior_seek( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_next_without_prior_seek(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; // next() without prior seek should start from beginning @@ -393,15 +362,14 @@ async fn test_next_without_prior_seek( /// Test next after seek #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_next_after_seek(storage: S) -> Result<(), OpProofsStorageError> { +fn test_next_after_seek(storage: S) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); let path2 = nibbles_from(vec![2]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path1, Some(branch.clone()))]).await?; - storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; cursor.seek(path1)?; @@ -416,15 +384,12 @@ async fn test_next_after_seek(storage: S) -> Result<(), OpProo /// Test next at end of trie #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_next_at_end_of_trie( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_next_at_end_of_trie(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; cursor.seek(path)?; @@ -438,16 +403,15 @@ async fn test_next_at_end_of_trie( /// Test multiple consecutive next calls #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_multiple_consecutive_next( +fn test_multiple_consecutive_next( storage: S, ) -> Result<(), OpProofsStorageError> { let paths = vec![nibbles_from(vec![1]), nibbles_from(vec![2]), nibbles_from(vec![3])]; let branch = create_test_branch(); for path in &paths { - storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; } let mut cursor = storage.account_trie_cursor(100)?; @@ -467,17 +431,14 @@ async fn test_multiple_consecutive_next( /// Test current after operations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_current_after_operations( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_current_after_operations(storage: S) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); let path2 = nibbles_from(vec![2]); let branch = create_test_branch(); - storage.store_account_branches(vec![(path1, Some(branch.clone()))]).await?; - storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; let mut cursor = storage.account_trie_cursor(100)?; @@ -498,9 +459,8 @@ async fn test_current_after_operations( /// Test current with no prior operations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_current_no_prior_operations( +fn test_current_no_prior_operations( storage: S, ) -> Result<(), OpProofsStorageError> { let mut cursor = storage.account_trie_cursor(100)?; @@ -518,9 +478,8 @@ async fn test_current_no_prior_operations( /// Test same path with different blocks #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_same_path_different_blocks( +fn test_same_path_different_blocks( storage: S, ) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); @@ -528,8 +487,8 @@ async fn test_same_path_different_blocks( let branch2 = create_test_branch_variant(); // Store same path at different blocks - storage.store_account_branches(vec![(path, Some(branch1.clone()))]).await?; - storage.store_account_branches(vec![(path, Some(branch2.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch1))])?; + storage.store_account_branches(vec![(path, Some(branch2))])?; // Cursor with max_block_number=75 should see only block 50 data let mut cursor75 = storage.account_trie_cursor(75)?; @@ -547,17 +506,14 @@ async fn test_same_path_different_blocks( /// Test deleted branch nodes #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_deleted_branch_nodes( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_deleted_branch_nodes(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); let branch = create_test_branch(); let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); // Store branch node, then delete it (store None) - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(path, Some(branch))])?; // Cursor before deletion should see the node let mut cursor75 = storage.account_trie_cursor(75)?; @@ -569,7 +525,7 @@ async fn test_deleted_branch_nodes( sorted_trie_updates: block_state_diff_trie_updates.into_sorted(), sorted_post_state: HashedPostStateSorted::default(), }; - storage.store_trie_updates(block_ref, block_state_diff).await?; + storage.store_trie_updates(block_ref, block_state_diff)?; // Cursor after deletion should not see the node let mut cursor150 = storage.account_trie_cursor(150)?; @@ -585,19 +541,16 @@ async fn test_deleted_branch_nodes( /// Test account-specific cursor #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_account_specific_cursor( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_account_specific_cursor(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); let addr1 = B256::repeat_byte(0x01); let addr2 = B256::repeat_byte(0x02); let branch = create_test_branch(); // Store same path for different accounts (using storage branches) - storage.store_storage_branches(addr1, vec![(path, Some(branch.clone()))]).await?; - storage.store_storage_branches(addr2, vec![(path, Some(branch.clone()))]).await?; + storage.store_storage_branches(addr1, vec![(path, Some(branch.clone()))])?; + storage.store_storage_branches(addr2, vec![(path, Some(branch))])?; // Cursor for addr1 should only see addr1 data let mut cursor1 = storage.storage_trie_cursor(addr1, 100)?; @@ -623,16 +576,15 @@ async fn test_account_specific_cursor( /// Test state trie cursor #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_state_trie_cursor(storage: S) -> Result<(), OpProofsStorageError> { +fn test_state_trie_cursor(storage: S) -> Result<(), OpProofsStorageError> { let path = nibbles_from(vec![1, 2]); let addr = B256::repeat_byte(0x01); let branch = create_test_branch(); // Store data for account trie and state trie - storage.store_storage_branches(addr, vec![(path, Some(branch.clone()))]).await?; - storage.store_account_branches(vec![(path, Some(branch.clone()))]).await?; + storage.store_storage_branches(addr, vec![(path, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path, Some(branch))])?; // State trie cursor (None address) should only see state trie data let mut state_cursor = storage.account_trie_cursor(100)?; @@ -654,19 +606,16 @@ async fn test_state_trie_cursor(storage: S) -> Result<(), OpPr /// Test mixed account and state data #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_mixed_account_state_data( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_mixed_account_state_data(storage: S) -> Result<(), OpProofsStorageError> { let path1 = nibbles_from(vec![1]); let path2 = nibbles_from(vec![2]); let addr = B256::repeat_byte(0x01); let branch = create_test_branch(); // Store mixed account and state trie data - storage.store_storage_branches(addr, vec![(path1, Some(branch.clone()))]).await?; - storage.store_account_branches(vec![(path2, Some(branch.clone()))]).await?; + storage.store_storage_branches(addr, vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; // Account cursor should only see account data let mut account_cursor = storage.storage_trie_cursor(addr, 100)?; @@ -696,11 +645,8 @@ async fn test_mixed_account_state_data( /// Test lexicographic ordering #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_lexicographic_ordering( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_lexicographic_ordering(storage: S) -> Result<(), OpProofsStorageError> { let paths = vec![ nibbles_from(vec![3, 1]), nibbles_from(vec![1, 2]), @@ -711,7 +657,7 @@ async fn test_lexicographic_ordering( // Store paths in random order for path in &paths { - storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; } let mut cursor = storage.account_trie_cursor(100)?; @@ -736,11 +682,8 @@ async fn test_lexicographic_ordering( /// Test path prefix scenarios #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_path_prefix_scenarios( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_path_prefix_scenarios(storage: S) -> Result<(), OpProofsStorageError> { let paths = vec![ nibbles_from(vec![1]), // Prefix of next nibbles_from(vec![1, 2]), // Extends first @@ -749,7 +692,7 @@ async fn test_path_prefix_scenarios( let branch = create_test_branch(); for path in &paths { - storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; } let mut cursor = storage.account_trie_cursor(100)?; @@ -771,9 +714,8 @@ async fn test_path_prefix_scenarios( /// Test complex nibble combinations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_complex_nibble_combinations( +fn test_complex_nibble_combinations( storage: S, ) -> Result<(), OpProofsStorageError> { // Test various nibble patterns including edge values @@ -787,7 +729,7 @@ async fn test_complex_nibble_combinations( let branch = create_test_branch(); for path in &paths { - storage.store_account_branches(vec![(*path, Some(branch.clone()))]).await?; + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; } let mut cursor = storage.account_trie_cursor(100)?; @@ -814,16 +756,15 @@ async fn test_complex_nibble_combinations( /// Test store and retrieve single account #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_store_and_retrieve_single_account( +fn test_store_and_retrieve_single_account( storage: S, ) -> Result<(), OpProofsStorageError> { let account_key = B256::repeat_byte(0x01); let account = create_test_account(); // Store account - storage.store_hashed_accounts(vec![(account_key, Some(account))]).await?; + storage.store_hashed_accounts(vec![(account_key, Some(account))])?; // Retrieve via cursor let mut cursor = storage.account_hashed_cursor(100)?; @@ -840,9 +781,8 @@ async fn test_store_and_retrieve_single_account( /// Test account cursor navigation #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_account_cursor_navigation( +fn test_account_cursor_navigation( storage: S, ) -> Result<(), OpProofsStorageError> { let accounts = [ @@ -853,7 +793,7 @@ async fn test_account_cursor_navigation( // Store accounts let accounts_to_store: Vec<_> = accounts.iter().map(|(k, v)| (*k, Some(*v))).collect(); - storage.store_hashed_accounts(accounts_to_store).await?; + storage.store_hashed_accounts(accounts_to_store)?; let mut cursor = storage.account_hashed_cursor(100)?; @@ -879,17 +819,14 @@ async fn test_account_cursor_navigation( /// Test account block versioning #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_account_block_versioning( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_account_block_versioning(storage: S) -> Result<(), OpProofsStorageError> { let account_key = B256::repeat_byte(0x01); let account_v1 = create_test_account_with_values(1, 100, 0xBB); let account_v2 = create_test_account_with_values(2, 200, 0xDD); // Store account at different blocks - storage.store_hashed_accounts(vec![(account_key, Some(account_v1))]).await?; + storage.store_hashed_accounts(vec![(account_key, Some(account_v1))])?; // Cursor with max_block_number=75 should see v1 let mut cursor75 = storage.account_hashed_cursor(75)?; @@ -897,7 +834,7 @@ async fn test_account_block_versioning( assert_eq!(result75.1.nonce, account_v1.nonce); assert_eq!(result75.1.balance, account_v1.balance); - storage.store_hashed_accounts(vec![(account_key, Some(account_v2))]).await?; + storage.store_hashed_accounts(vec![(account_key, Some(account_v2))])?; // After update, Cursor with max_block_number=150 should see v2 let mut cursor150 = storage.account_hashed_cursor(150)?; @@ -911,8 +848,8 @@ async fn test_account_block_versioning( /// Test store and retrieve storage #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] -async fn test_store_and_retrieve_storage( + +fn test_store_and_retrieve_storage( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -923,7 +860,7 @@ async fn test_store_and_retrieve_storage( ]; // Store storage slots - storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + storage.store_hashed_storages(hashed_address, storage_slots.clone())?; // Retrieve via cursor let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; @@ -941,9 +878,8 @@ async fn test_store_and_retrieve_storage( /// Test storage cursor navigation #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_storage_cursor_navigation( +fn test_storage_cursor_navigation( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -953,7 +889,7 @@ async fn test_storage_cursor_navigation( (B256::repeat_byte(0x50), U256::from(500)), ]; - storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + storage.store_hashed_storages(hashed_address, storage_slots.clone())?; let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; @@ -974,9 +910,8 @@ async fn test_storage_cursor_navigation( /// Test storage account isolation #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_storage_account_isolation( +fn test_storage_account_isolation( storage: S, ) -> Result<(), OpProofsStorageError> { let address1 = B256::repeat_byte(0x01); @@ -984,8 +919,8 @@ async fn test_storage_account_isolation( let storage_key = B256::repeat_byte(0x10); // Store same storage key for different accounts - storage.store_hashed_storages(address1, vec![(storage_key, U256::from(100))]).await?; - storage.store_hashed_storages(address2, vec![(storage_key, U256::from(200))]).await?; + storage.store_hashed_storages(address1, vec![(storage_key, U256::from(100))])?; + storage.store_hashed_storages(address2, vec![(storage_key, U256::from(200))])?; // Verify each account sees only its own storage let mut cursor1 = storage.storage_hashed_cursor(address1, 100)?; @@ -1010,23 +945,20 @@ async fn test_storage_account_isolation( /// Test storage block versioning #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_storage_block_versioning( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_storage_block_versioning(storage: S) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); let storage_key = B256::repeat_byte(0x10); // Store storage at different blocks - storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))]).await?; + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))])?; // Cursor with max_block_number=75 should see old value let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; let result75 = cursor75.seek(storage_key)?.unwrap(); assert_eq!(result75.1, U256::from(100)); - storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(200))]).await?; + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(200))])?; // Cursor with max_block_number=150 should see new value let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; let result150 = cursor150.seek(storage_key)?.unwrap(); @@ -1038,16 +970,15 @@ async fn test_storage_block_versioning( /// Test storage zero value deletion #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_storage_zero_value_deletion( +fn test_storage_zero_value_deletion( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); let storage_key = B256::repeat_byte(0x10); // Store non-zero value - storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))]).await?; + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))])?; // Cursor before deletion should see the value let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; @@ -1066,7 +997,7 @@ async fn test_storage_zero_value_deletion( sorted_trie_updates: TrieUpdatesSorted::default(), sorted_post_state: block_state_diff_post_state.into_sorted(), }; - storage.store_trie_updates(block_ref, block_state_diff).await?; + storage.store_trie_updates(block_ref, block_state_diff)?; // Cursor after deletion should NOT see the entry (zero values are skipped) let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; @@ -1079,9 +1010,8 @@ async fn test_storage_zero_value_deletion( /// Test that zero values are skipped during iteration #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_storage_cursor_skips_zero_values( +fn test_storage_cursor_skips_zero_values( storage: S, ) -> Result<(), OpProofsStorageError> { let hashed_address = B256::repeat_byte(0x01); @@ -1096,7 +1026,7 @@ async fn test_storage_cursor_skips_zero_values( ]; // Store all slots - storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + storage.store_hashed_storages(hashed_address, storage_slots)?; // Create cursor and iterate through all entries let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; @@ -1129,9 +1059,8 @@ async fn test_storage_cursor_skips_zero_values( /// Test empty cursors #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_empty_cursors(storage: S) -> Result<(), OpProofsStorageError> { +fn test_empty_cursors(storage: S) -> Result<(), OpProofsStorageError> { // Test empty account cursor let mut account_cursor = storage.account_hashed_cursor(100)?; assert!(account_cursor.seek(B256::repeat_byte(0x01))?.is_none()); @@ -1148,15 +1077,14 @@ async fn test_empty_cursors(storage: S) -> Result<(), OpProofs /// Test cursor boundary conditions #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_cursor_boundary_conditions( +fn test_cursor_boundary_conditions( storage: S, ) -> Result<(), OpProofsStorageError> { let account_key = B256::repeat_byte(0x80); // Middle value let account = create_test_account(); - storage.store_hashed_accounts(vec![(account_key, Some(account))]).await?; + storage.store_hashed_accounts(vec![(account_key, Some(account))])?; let mut cursor = storage.account_hashed_cursor(100)?; @@ -1178,11 +1106,8 @@ async fn test_cursor_boundary_conditions( /// Test large batch operations #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_large_batch_operations( - storage: S, -) -> Result<(), OpProofsStorageError> { +fn test_large_batch_operations(storage: S) -> Result<(), OpProofsStorageError> { // Create large batch of accounts let mut accounts = Vec::new(); for i in 0..100 { @@ -1192,7 +1117,7 @@ async fn test_large_batch_operations( } // Store in batch - storage.store_hashed_accounts(accounts.clone()).await?; + storage.store_hashed_accounts(accounts.clone())?; // Verify all accounts can be retrieved let mut cursor = storage.account_hashed_cursor(100)?; @@ -1217,9 +1142,8 @@ async fn test_large_batch_operations( /// it should iterate all existing values for that address and create deletion entries for them. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_store_trie_updates_with_wiped_storage( +fn test_store_trie_updates_with_wiped_storage( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::HashedStorage; @@ -1236,7 +1160,7 @@ async fn test_store_trie_updates_with_wiped_storage( (B256::repeat_byte(0x40), U256::from(400)), ]; - storage.store_hashed_storages(hashed_address, storage_slots.clone()).await?; + storage.store_hashed_storages(hashed_address, storage_slots.clone())?; // Verify all values are present at block 75 let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; @@ -1261,7 +1185,7 @@ async fn test_store_trie_updates_with_wiped_storage( }; // Store the wiped state - storage.store_trie_updates(block_ref, block_state_diff).await?; + storage.store_trie_updates(block_ref, block_state_diff)?; // After wiping, cursor at block 150 should see NO storage values let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; @@ -1309,9 +1233,8 @@ async fn test_store_trie_updates_with_wiped_storage( /// through the cursor APIs. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_store_trie_updates_comprehensive( +fn test_store_trie_updates_comprehensive( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::{updates::StorageTrieUpdates, HashedStorage}; @@ -1327,8 +1250,8 @@ async fn test_store_trie_updates_comprehensive( let account_branch1 = create_test_branch(); let account_branch2 = create_test_branch_variant(); - trie_updates.account_nodes.insert(account_path1, account_branch1.clone()); - trie_updates.account_nodes.insert(account_path2, account_branch2.clone()); + trie_updates.account_nodes.insert(account_path1, account_branch1); + trie_updates.account_nodes.insert(account_path2, account_branch2); // Add removed account nodes let removed_account_path = nibbles_from(vec![7, 8, 9]); @@ -1342,7 +1265,7 @@ async fn test_store_trie_updates_comprehensive( let mut storage_trie = StorageTrieUpdates::default(); storage_trie.storage_nodes.insert(storage_path1, storage_branch.clone()); - storage_trie.storage_nodes.insert(storage_path2, storage_branch.clone()); + storage_trie.storage_nodes.insert(storage_path2, storage_branch); // Add removed storage node let removed_storage_path = nibbles_from(vec![3, 3]); @@ -1380,7 +1303,7 @@ async fn test_store_trie_updates_comprehensive( }; // Store the updates - storage.store_trie_updates(block_ref, block_state_diff).await?; + storage.store_trie_updates(block_ref, block_state_diff)?; // ========== Verify Account Branch Nodes ========== let mut account_trie_cursor = storage.account_trie_cursor(block_ref.block.number + 10)?; @@ -1452,7 +1375,7 @@ async fn test_store_trie_updates_comprehensive( ); // ========== Verify fetch_trie_updates can retrieve the data ========== - let fetched_diff = storage.fetch_trie_updates(block_ref.block.number).await?; + let fetched_diff = storage.fetch_trie_updates(block_ref.block.number)?; // Check that trie updates are stored assert_eq!( @@ -1484,9 +1407,8 @@ async fn test_store_trie_updates_comprehensive( /// (`hashed_accounts`, `hashed_storages`, `account_branches`, `storage_branches`). #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_replace_updates_applies_all_updates( +fn test_replace_updates_applies_all_updates( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::{updates::StorageTrieUpdates, HashedStorage}; @@ -1515,7 +1437,7 @@ async fn test_replace_updates_applies_all_updates( sorted_trie_updates: initial_trie_updates_50.into_sorted(), sorted_post_state: initial_post_state_50.into_sorted(), }; - storage.store_trie_updates(block_ref_50, initial_diff_50).await?; + storage.store_trie_updates(block_ref_50, initial_diff_50)?; // Store data at block 100 (common block) let mut initial_trie_updates_100 = TrieUpdates::default(); @@ -1535,12 +1457,12 @@ async fn test_replace_updates_applies_all_updates( let block_ref_100 = BlockWithParent::new(block_ref_50.block.hash, NumHash::new(100, B256::repeat_byte(0x97))); - storage.store_trie_updates(block_ref_100, initial_diff_100).await?; + storage.store_trie_updates(block_ref_100, initial_diff_100)?; // Store data at block 101 (will be replaced) let mut initial_trie_updates_101 = TrieUpdates::default(); let old_branch_path = nibbles_from(vec![7, 8, 9]); - initial_trie_updates_101.account_nodes.insert(old_branch_path, initial_branch.clone()); + initial_trie_updates_101.account_nodes.insert(old_branch_path, initial_branch); let mut initial_post_state_101 = HashedPostState::default(); let old_account_addr = B256::repeat_byte(0x30); @@ -1553,7 +1475,7 @@ async fn test_replace_updates_applies_all_updates( }; let block_ref_101 = BlockWithParent::new(block_ref_100.block.hash, NumHash::new(101, B256::repeat_byte(0x98))); - storage.store_trie_updates(block_ref_101, initial_diff_101).await?; + storage.store_trie_updates(block_ref_101, initial_diff_101)?; let block_ref_102 = BlockWithParent::new(block_ref_101.block.hash, NumHash::new(102, B256::repeat_byte(0x99))); @@ -1625,7 +1547,7 @@ async fn test_replace_updates_applies_all_updates( let mut trie_updates_102 = TrieUpdates::default(); let block_102_branch_path = nibbles_from(vec![15, 14, 13]); - trie_updates_102.account_nodes.insert(block_102_branch_path, new_branch.clone()); + trie_updates_102.account_nodes.insert(block_102_branch_path, new_branch); let mut post_state_102 = HashedPostState::default(); post_state_102.accounts.insert(block_102_account_addr, Some(block_102_account)); @@ -1639,9 +1561,7 @@ async fn test_replace_updates_applies_all_updates( )); // Execute replace_updates - storage - .replace_updates(BlockNumHash::new(100, block_ref_100.block.hash), blocks_to_add) - .await?; + storage.replace_updates(BlockNumHash::new(100, block_ref_100.block.hash), blocks_to_add)?; // ========== Verify that data up to block 100 still exists ========== let mut cursor_50 = storage.account_trie_cursor(75)?; assert!( @@ -1721,7 +1641,7 @@ async fn test_replace_updates_applies_all_updates( assert_eq!(account_result_102.as_ref().unwrap().1.nonce, block_102_account.nonce); // Verify fetch_trie_updates returns the new data - let fetched_101 = storage.fetch_trie_updates(101).await?; + let fetched_101 = storage.fetch_trie_updates(101)?; assert_eq!( fetched_101.sorted_trie_updates.account_nodes_ref().len(), 1, @@ -1754,9 +1674,8 @@ async fn test_replace_updates_applies_all_updates( /// it is properly stored as a deletion and subsequent queries return None for that path. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_pure_deletions_stored_correctly( +fn test_pure_deletions_stored_correctly( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::updates::StorageTrieUpdates; @@ -1776,7 +1695,7 @@ async fn test_pure_deletions_stored_correctly( let mut storage_trie = StorageTrieUpdates::default(); storage_trie.storage_nodes.insert(storage_path1, initial_branch.clone()); - storage_trie.storage_nodes.insert(storage_path2, initial_branch.clone()); + storage_trie.storage_nodes.insert(storage_path2, initial_branch); initial_trie_updates.insert_storage_updates(storage_address, storage_trie); let initial_diff = BlockStateDiff { @@ -1786,7 +1705,7 @@ async fn test_pure_deletions_stored_correctly( let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); - storage.store_trie_updates(block_ref_50, initial_diff).await?; + storage.store_trie_updates(block_ref_50, initial_diff)?; // Verify initial state exists at block 75 let mut cursor_75 = storage.account_trie_cursor(75)?; @@ -1828,7 +1747,7 @@ async fn test_pure_deletions_stored_correctly( let block_ref_100 = BlockWithParent::new(B256::repeat_byte(0x96), NumHash::new(100, B256::repeat_byte(0x97))); - storage.store_trie_updates(block_ref_100, deletion_diff).await?; + storage.store_trie_updates(block_ref_100, deletion_diff)?; // ========== Verify that deleted nodes return None at block 150 ========== @@ -1889,9 +1808,8 @@ async fn test_pure_deletions_stored_correctly( /// when processing trie updates that both remove and update the same node. #[test_case(InMemoryProofsStorage::new(); "InMemory")] #[test_case(create_mdbx_proofs_storage(); "Mdbx")] -#[tokio::test] #[serial] -async fn test_updates_take_precedence_over_removals( +fn test_updates_take_precedence_over_removals( storage: S, ) -> Result<(), OpProofsStorageError> { use reth_trie::updates::StorageTrieUpdates; @@ -1917,7 +1835,7 @@ async fn test_updates_take_precedence_over_removals( let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); - storage.store_trie_updates(block_ref_50, initial_diff).await?; + storage.store_trie_updates(block_ref_50, initial_diff)?; // Verify initial state exists at block 75 let mut cursor_75 = storage.account_trie_cursor(75)?; @@ -1959,7 +1877,7 @@ async fn test_updates_take_precedence_over_removals( let block_ref_100 = BlockWithParent::new(B256::repeat_byte(0x96), NumHash::new(100, B256::repeat_byte(0x97))); - storage.store_trie_updates(block_ref_100, conflicting_diff).await?; + storage.store_trie_updates(block_ref_100, conflicting_diff)?; // ========== Verify that updates took precedence at block 150 ========== diff --git a/crates/optimism/trie/tests/live.rs b/crates/optimism/trie/tests/live.rs index c4df14b84a1..b44b27f17c1 100644 --- a/crates/optimism/trie/tests/live.rs +++ b/crates/optimism/trie/tests/live.rs @@ -202,7 +202,7 @@ where } /// Runs a test scenario with the given configuration -async fn run_test_scenario( +fn run_test_scenario( scenario: TestScenario, provider_factory: ProviderFactory, chain_spec: Arc, @@ -242,7 +242,7 @@ where let provider = provider_factory.db_ref(); let tx = provider.tx()?; let initialization_job = InitializationJob::new(storage.clone(), tx); - initialization_job.run(last_block_number, last_block_hash).await?; + initialization_job.run(last_block_number, last_block_hash)?; } // Execute blocks after initialization using live collector @@ -268,7 +268,7 @@ where LiveTrieCollector::new(evm_config.clone(), blockchain_db, &storage); // Use the live collector to execute and store trie updates - live_trie_collector.execute_and_store_block_updates(&block).await?; + live_trie_collector.execute_and_store_block_updates(&block)?; // Commit the block to the database so subsequent blocks can build on it commit_block_to_database(&block, &execution_output, &provider_factory)?; @@ -283,8 +283,8 @@ where /// (1) Creates a chain with some state /// (2) Stores the genesis state into storage via initialization /// (3) Executes a block and calculates the state root using the stored state -#[tokio::test] -async fn test_execute_and_store_block_updates() { +#[test] +fn test_execute_and_store_block_updates() { let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); @@ -312,11 +312,11 @@ async fn test_execute_and_store_block_updates() { vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], ); - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); } -#[tokio::test] -async fn test_execute_and_store_block_updates_missing_parent_block() { +#[test] +fn test_execute_and_store_block_updates_missing_parent_block() { let dir = TempDir::new().unwrap(); let storage: OpProofsStorage> = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); @@ -340,7 +340,6 @@ async fn test_execute_and_store_block_updates_missing_parent_block() { key_pair, storage.clone(), ) - .await .unwrap(); // Create a block whose parent block number is missing. @@ -357,18 +356,18 @@ async fn test_execute_and_store_block_updates_missing_parent_block() { &mut nonce_counter, ); - let blockchain_db = BlockchainProvider::new(provider_factory.clone()).unwrap(); + let blockchain_db = BlockchainProvider::new(provider_factory).unwrap(); let collector = LiveTrieCollector::new(EthEvmConfig::ethereum(chain_spec.clone()), blockchain_db, &storage); // EXPECT: MissingParentBlock - let err = collector.execute_and_store_block_updates(&incorrect_block).await.unwrap_err(); + let err = collector.execute_and_store_block_updates(&incorrect_block).unwrap_err(); assert!(matches!(err, OpProofsStorageError::MissingParentBlock { .. })); } -#[tokio::test] -async fn test_execute_and_store_block_updates_state_root_mismatch() { +#[test] +fn test_execute_and_store_block_updates_state_root_mismatch() { let dir = TempDir::new().unwrap(); let storage: OpProofsStorage> = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); @@ -395,7 +394,6 @@ async fn test_execute_and_store_block_updates_state_root_mismatch() { key_pair, storage.clone(), ) - .await .unwrap(); // Generate a second block normally @@ -424,14 +422,14 @@ async fn test_execute_and_store_block_updates_state_root_mismatch() { block.header_mut().state_root = B256::repeat_byte(0xAA); // EXPECT: StateRootMismatch - let err = collector.execute_and_store_block_updates(&block).await.unwrap_err(); + let err = collector.execute_and_store_block_updates(&block).unwrap_err(); assert!(matches!(err, OpProofsStorageError::StateRootMismatch { .. })); } /// Test with multiple blocks before and after initialization -#[tokio::test] -async fn test_multiple_blocks_before_and_after_initialization() { +#[test] +fn test_multiple_blocks_before_and_after_initialization() { let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); @@ -463,12 +461,12 @@ async fn test_multiple_blocks_before_and_after_initialization() { ], ); - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); } /// Test with blocks containing multiple transactions -#[tokio::test] -async fn test_blocks_with_multiple_transactions() { +#[test] +fn test_blocks_with_multiple_transactions() { let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); @@ -494,5 +492,5 @@ async fn test_blocks_with_multiple_transactions() { ])], ); - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).await.unwrap(); + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); }