diff --git a/crates/optimism/cli/src/commands/op_proofs/init.rs b/crates/optimism/cli/src/commands/op_proofs/init.rs index a9aba66dc56..5938be24573 100644 --- a/crates/optimism/cli/src/commands/op_proofs/init.rs +++ b/crates/optimism/cli/src/commands/op_proofs/init.rs @@ -7,7 +7,9 @@ use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, Environ use reth_node_core::version::version_metadata; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::OpPrimitives; -use reth_optimism_trie::{db::MdbxProofsStorage, BackfillJob, OpProofsStorage, OpProofsStore}; +use reth_optimism_trie::{ + db::MdbxProofsStorage, InitializationJob, OpProofsStorage, OpProofsStore, +}; use reth_provider::{BlockNumReader, DBProvider, DatabaseProviderFactory}; use std::{path::PathBuf, sync::Arc}; use tracing::info; @@ -78,7 +80,7 @@ impl> InitCommand { provider_factory.database_provider_ro()?.disable_long_read_transaction_safety(); let db_tx = db_provider.into_tx(); - BackfillJob::new(storage.clone(), &db_tx).run(best_number, best_hash).await?; + InitializationJob::new(storage.clone(), &db_tx).run(best_number, best_hash).await?; } info!( diff --git a/crates/optimism/trie/src/error.rs b/crates/optimism/trie/src/error.rs index d7cc3ab78e1..9d0eb5112bf 100644 --- a/crates/optimism/trie/src/error.rs +++ b/crates/optimism/trie/src/error.rs @@ -90,12 +90,12 @@ pub enum OpProofsStorageError { /// Error occurred while interacting with the provider. #[error(transparent)] ProviderError(Arc), - /// Backfill detected inconsistent state between proofs storage and source DB. + /// Initialization detected inconsistent state between proofs storage and source DB. #[error( - "Backfill detected inconsistent state. Proofs storage does not match source DB. \ - Please clear proofs data and retry backfill." + "Initialization Proofs storage detected inconsistent state. Storage does not match source DB. \ + Please clear proofs data and retry initialization." )] - BackfillInconsistentState, + InitializeStorageInconsistentState, } impl From for OpProofsStorageError { diff --git a/crates/optimism/trie/src/backfill.rs b/crates/optimism/trie/src/initialize.rs similarity index 86% rename from crates/optimism/trie/src/backfill.rs rename to crates/optimism/trie/src/initialize.rs index 62583915eb0..e28f8aa25d0 100644 --- a/crates/optimism/trie/src/backfill.rs +++ b/crates/optimism/trie/src/initialize.rs @@ -1,4 +1,5 @@ -//! Backfill job for proofs storage. Handles storing the existing state into the proofs storage. +//! Initialization job for proofs storage. Handles storing the existing state into the proofs +//! storage. use crate::{ api::{InitialStateAnchor, InitialStateStatus, OpProofsInitialStateStore}, @@ -20,15 +21,15 @@ use reth_trie_common::{ use std::{collections::HashMap, time::Instant}; use tracing::info; -/// Batch size threshold for storing entries during backfill -const BACKFILL_STORAGE_THRESHOLD: usize = 100000; +/// Batch size threshold for storing entries during initialization +const INITIALIZE_STORAGE_THRESHOLD: usize = 100000; -/// Threshold for logging progress during backfill -const BACKFILL_LOG_THRESHOLD: usize = 100000; +/// Threshold for logging progress during initialization +const INITIALIZE_LOG_THRESHOLD: usize = 100000; -/// Backfill job for external storage. +/// Initialization job for external storage. #[derive(Debug)] -pub struct BackfillJob<'a, Tx: DbTx, S: OpProofsStore + Send> { +pub struct InitializationJob<'a, Tx: DbTx, S: OpProofsStore + Send> { storage: S, tx: &'a Tx, } @@ -98,7 +99,7 @@ define_simple_cursor_iter!( ); define_dup_cursor_iter!(StoragesTrieIter, tables::StoragesTrie, B256, StorageTrieEntry); -/// Trait to estimate the progress of a backfill job based on the key. +/// Trait to estimate the progress of a initialization job based on the key. trait CompletionEstimatable { // Returns a progress estimate as a percentage (0.0 to 1.0) fn estimate_progress(&self) -> f64; @@ -129,8 +130,8 @@ impl CompletionEstimatable for StoredNibbles { } } -/// Backfill a table from a source iterator to a storage function. Handles batching and logging. -async fn backfill< +/// Initialize a table from a source iterator to a storage function. Handles batching and logging. +async fn initialize< S: Iterator>, F: Future> + Send, Key: CompletionEstimatable + Clone + 'static, @@ -146,7 +147,7 @@ async fn backfill< let mut total_entries: u64 = 0; - info!("Starting {} backfill", name); + info!("Starting {} initialization", name); let start_time = Instant::now(); let mut source = source.peekable(); @@ -199,14 +200,14 @@ async fn backfill< save_fn(entries).await?; } - info!("{} backfill complete: {} entries", name, total_entries); + info!("{} initialization complete: {} entries", name, total_entries); Ok(total_entries) } impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> - BackfillJob<'a, Tx, S> + InitializationJob<'a, Tx, S> { - /// Create a new backfill job. + /// Create a new initialization job. pub const fn new(storage: S, tx: &'a Tx) -> Self { Self { storage, tx } } @@ -281,8 +282,8 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> Ok(()) } - /// Backfill hashed accounts data - async fn backfill_hashed_accounts( + /// Initialize hashed accounts data + async fn initialize_hashed_accounts( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -292,15 +293,15 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> start_cursor .seek(latest)? .filter(|(k, _)| *k == latest) - .ok_or(OpProofsStorageError::BackfillInconsistentState)?; + .ok_or(OpProofsStorageError::InitializeStorageInconsistentState)?; } let source = HashedAccountsIter::new(start_cursor); - backfill( + initialize( "hashed accounts", source, - BACKFILL_STORAGE_THRESHOLD, - BACKFILL_LOG_THRESHOLD, + INITIALIZE_STORAGE_THRESHOLD, + INITIALIZE_LOG_THRESHOLD, |entries| self.save_hashed_accounts(entries), ) .await?; @@ -308,8 +309,8 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> Ok(()) } - /// Backfill hashed storage data - async fn backfill_hashed_storages( + /// Initialize hashed storage data + async fn initialize_hashed_storages( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -319,15 +320,15 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> start_cursor .seek_by_key_subkey(latest.hashed_address, latest.hashed_storage_key)? .filter(|v| v.key == latest.hashed_storage_key) - .ok_or(OpProofsStorageError::BackfillInconsistentState)?; + .ok_or(OpProofsStorageError::InitializeStorageInconsistentState)?; } let source = HashedStoragesIter::new(start_cursor); - backfill( + initialize( "hashed storage", source, - BACKFILL_STORAGE_THRESHOLD, - BACKFILL_LOG_THRESHOLD, + INITIALIZE_STORAGE_THRESHOLD, + INITIALIZE_LOG_THRESHOLD, |entries| self.save_hashed_storages(entries), ) .await?; @@ -335,8 +336,8 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> Ok(()) } - /// Backfill accounts trie data - async fn backfill_accounts_trie( + /// Initialize accounts trie data + async fn initialize_accounts_trie( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -346,15 +347,15 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> start_cursor .seek(latest_key.clone())? .filter(|(k, _)| *k == latest_key) - .ok_or(OpProofsStorageError::BackfillInconsistentState)?; + .ok_or(OpProofsStorageError::InitializeStorageInconsistentState)?; } let source = AccountsTrieIter::new(start_cursor); - backfill( + initialize( "accounts trie", source, - BACKFILL_STORAGE_THRESHOLD, - BACKFILL_LOG_THRESHOLD, + INITIALIZE_STORAGE_THRESHOLD, + INITIALIZE_LOG_THRESHOLD, |entries| self.save_account_branches(entries), ) .await?; @@ -362,8 +363,8 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> Ok(()) } - /// Backfill storage trie data - async fn backfill_storages_trie( + /// Initialize storage trie data + async fn initialize_storages_trie( &self, start_key: Option, ) -> Result<(), OpProofsStorageError> { @@ -376,15 +377,15 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> StoredNibblesSubKey::from(latest_key.path.0), )? .filter(|v| v.nibbles.0 == latest_key.path.0) - .ok_or(OpProofsStorageError::BackfillInconsistentState)?; + .ok_or(OpProofsStorageError::InitializeStorageInconsistentState)?; } let source = StoragesTrieIter::new(start_cursor); - backfill( + initialize( "storage trie", source, - BACKFILL_STORAGE_THRESHOLD, - BACKFILL_LOG_THRESHOLD, + INITIALIZE_STORAGE_THRESHOLD, + INITIALIZE_LOG_THRESHOLD, |entries| self.save_storage_branches(entries), ) .await?; @@ -392,12 +393,15 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> Ok(()) } - /// Run complete backfill of all preimage data - async fn backfill_trie(&self, anchor: InitialStateAnchor) -> Result<(), OpProofsStorageError> { - self.backfill_hashed_accounts(anchor.latest_hashed_account_key).await?; - self.backfill_hashed_storages(anchor.latest_hashed_storage_key).await?; - self.backfill_storages_trie(anchor.latest_storage_trie_key).await?; - self.backfill_accounts_trie(anchor.latest_account_trie_key).await?; + /// Run complete initialization of all preimage data + async fn initialize_trie( + &self, + anchor: InitialStateAnchor, + ) -> Result<(), OpProofsStorageError> { + self.initialize_hashed_accounts(anchor.latest_hashed_account_key).await?; + self.initialize_hashed_storages(anchor.latest_hashed_storage_key).await?; + self.initialize_storages_trie(anchor.latest_storage_trie_key).await?; + self.initialize_accounts_trie(anchor.latest_account_trie_key).await?; Ok(()) } @@ -407,16 +411,16 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> best_number: u64, best_hash: B256, ) -> Result<(), OpProofsStorageError> { - let block = anchor.block.ok_or(OpProofsStorageError::BackfillInconsistentState)?; + let block = anchor.block.ok_or(OpProofsStorageError::InitializeStorageInconsistentState)?; if block.number != best_number || block.hash != best_hash { - return Err(OpProofsStorageError::BackfillInconsistentState); + return Err(OpProofsStorageError::InitializeStorageInconsistentState); } Ok(()) } - /// Run the backfill job. + /// Run the initialization job. pub async fn run(&self, best_number: u64, best_hash: B256) -> Result<(), OpProofsStorageError> { let anchor = self.storage.initial_state_anchor().await?; @@ -432,7 +436,7 @@ impl<'a, Tx: DbTx + Sync, S: OpProofsStore + OpProofsInitialStateStore + Send> } } - self.backfill_trie(anchor).await?; + self.initialize_trie(anchor).await?; self.storage.commit_initial_state().await?; Ok(()) @@ -478,7 +482,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_hashed_accounts() { + async fn test_initialize_hashed_accounts() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -511,10 +515,10 @@ mod tests { drop(cursor); tx.commit().unwrap(); - // Run backfill + // Run initialization let tx = db.tx().unwrap(); - let job = BackfillJob::new(storage.clone(), &tx); - job.backfill_hashed_accounts(None).await.unwrap(); + let job = InitializationJob::new(storage.clone(), &tx); + job.initialize_hashed_accounts(None).await.unwrap(); // Verify data was stored (will be in sorted order) let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); @@ -529,7 +533,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_hashed_storage() { + async fn test_initialize_hashed_storage() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -562,10 +566,10 @@ mod tests { drop(cursor); tx.commit().unwrap(); - // Run backfill + // Run initialization let tx = db.tx().unwrap(); - let job = BackfillJob::new(storage.clone(), &tx); - job.backfill_hashed_storages(None).await.unwrap(); + let job = InitializationJob::new(storage.clone(), &tx); + job.initialize_hashed_storages(None).await.unwrap(); // Verify data was stored for addr1 let mut storage_cursor = storage.storage_hashed_cursor(addr1, 100).unwrap(); @@ -588,7 +592,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_accounts_trie() { + async fn test_initialize_accounts_trie() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -610,10 +614,10 @@ mod tests { drop(cursor); tx.commit().unwrap(); - // Run backfill + // Run initialization let tx = db.tx().unwrap(); - let job = BackfillJob::new(storage.clone(), &tx); - job.backfill_accounts_trie(None).await.unwrap(); + let job = InitializationJob::new(storage.clone(), &tx); + job.initialize_accounts_trie(None).await.unwrap(); // Verify data was stored let mut trie_cursor = storage.account_trie_cursor(100).unwrap(); @@ -626,7 +630,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_storages_trie() { + async fn test_initialize_storages_trie() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -669,10 +673,10 @@ mod tests { drop(cursor); tx.commit().unwrap(); - // Run backfill + // Run initialization let tx = db.tx().unwrap(); - let job = BackfillJob::new(storage.clone(), &tx); - job.backfill_storages_trie(None).await.unwrap(); + let job = InitializationJob::new(storage.clone(), &tx); + job.initialize_storages_trie(None).await.unwrap(); // Verify data was stored for addr1 let mut trie_cursor = storage.storage_trie_cursor(addr1, 100).unwrap(); @@ -695,7 +699,7 @@ mod tests { } #[tokio::test] - async fn test_full_backfill_run() { + async fn test_full_initialize_run() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -746,9 +750,9 @@ mod tests { tx.commit().unwrap(); - // Run full backfill + // Run full initialization let tx = db.tx().unwrap(); - let job = BackfillJob::new(storage.clone(), &tx); + let job = InitializationJob::new(storage.clone(), &tx); let best_number = 100; let best_hash = B256::repeat_byte(0x42); @@ -758,13 +762,13 @@ mod tests { job.run(best_number, best_hash).await.unwrap(); - // Should be set after backfill + // Should be set after initialization assert_eq!( storage.get_earliest_block_number().await.unwrap(), Some((best_number, best_hash)) ); - // Verify data was backfilled + // Verify data was initialized let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); assert!(account_cursor.next().unwrap().is_some()); @@ -779,7 +783,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_run_skips_if_already_done() { + async fn test_initialize_run_skips_if_already_done() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -792,9 +796,9 @@ mod tests { storage.commit_initial_state().await.expect("commit anchor"); let tx = db.tx().unwrap(); - let job = BackfillJob::new(storage.clone(), &tx); + let job = InitializationJob::new(storage.clone(), &tx); - // Run backfill - should skip + // Run initialization - should skip job.run(100, B256::repeat_byte(0x42)).await.unwrap(); // Should still have the old anchor @@ -813,7 +817,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_resumes_hashed_accounts_with_no_dups() { + async fn test_initialize_resumes_hashed_accounts_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -836,11 +840,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #1 + // Initialization #1 { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_hashed_accounts(None).await.unwrap(); + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_hashed_accounts(None).await.unwrap(); } // Resume point must be k2 (max) @@ -862,11 +866,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #2 (restart) + // Initialization #2 (restart) { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_hashed_accounts(Some(k2)).await.unwrap(); + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_hashed_accounts(Some(k2)).await.unwrap(); } // Now resume point must be k4 @@ -896,7 +900,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_resumes_hashed_storages_with_no_dups() { + async fn test_initialize_resumes_hashed_storages_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -926,11 +930,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #1 + // Initialization #1 { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_hashed_storages(None).await.unwrap(); + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_hashed_storages(None).await.unwrap(); } // Latest key must be (a2, s21) because a2 > a1 @@ -951,11 +955,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #2 + // Initialization #2 { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_hashed_storages(Some(HashedStorageKey::new(a2, s21))).await.unwrap(); + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_hashed_storages(Some(HashedStorageKey::new(a2, s21))).await.unwrap(); } // Latest key now must be (a2, s22) @@ -992,7 +996,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_resumes_accounts_trie_with_no_dups() { + async fn test_initialize_resumes_accounts_trie_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -1016,11 +1020,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #1 + // Initialization #1 { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_accounts_trie(None).await.unwrap(); + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_accounts_trie(None).await.unwrap(); } assert_eq!( @@ -1037,11 +1041,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #2 + // Initialization #2 { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_accounts_trie(Some(p2.clone())).await.unwrap(); + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_accounts_trie(Some(p2.clone())).await.unwrap(); } assert_eq!( @@ -1063,7 +1067,7 @@ mod tests { } #[tokio::test] - async fn test_backfill_resumes_storages_trie_with_no_dups() { + async fn test_initialize_resumes_storages_trie_with_no_dups() { let db = create_test_rw_db(); let dir = TempDir::new().unwrap(); let store = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")); @@ -1097,11 +1101,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #1 + // Initialization #1 { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_storages_trie(None).await.unwrap(); + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_storages_trie(None).await.unwrap(); } // Latest must be (a2, n2) because a2 > a1 @@ -1126,11 +1130,11 @@ mod tests { tx.commit().unwrap(); } - // Backfill #2 + // Initialization #2 { let tx = db.tx().unwrap(); - let job = BackfillJob::new(store.clone(), &tx); - job.backfill_storages_trie(Some(StorageTrieKey::new(a2, StoredNibbles::from(n2.0)))) + let job = InitializationJob::new(store.clone(), &tx); + job.initialize_storages_trie(Some(StorageTrieKey::new(a2, StoredNibbles::from(n2.0)))) .await .unwrap(); } diff --git a/crates/optimism/trie/src/lib.rs b/crates/optimism/trie/src/lib.rs index 0d0ca968eca..1de99b3ed1d 100644 --- a/crates/optimism/trie/src/lib.rs +++ b/crates/optimism/trie/src/lib.rs @@ -15,8 +15,8 @@ pub mod api; pub use api::{BlockStateDiff, OpProofsStore}; -pub mod backfill; -pub use backfill::BackfillJob; +pub mod initialize; +pub use initialize::InitializationJob; pub mod in_memory; pub use in_memory::{ diff --git a/crates/optimism/trie/tests/live.rs b/crates/optimism/trie/tests/live.rs index 88581915571..fe54e3cfd78 100644 --- a/crates/optimism/trie/tests/live.rs +++ b/crates/optimism/trie/tests/live.rs @@ -12,7 +12,7 @@ use reth_evm::{execute::Executor, ConfigureEvm}; use reth_evm_ethereum::EthEvmConfig; use reth_node_api::{NodePrimitives, NodeTypesWithDB}; use reth_optimism_trie::{ - backfill::BackfillJob, live::LiveTrieCollector, MdbxProofsStorage, OpProofsStorage, + initialize::InitializationJob, live::LiveTrieCollector, MdbxProofsStorage, OpProofsStorage, OpProofsStorageError, }; use reth_primitives_traits::{ @@ -58,10 +58,10 @@ struct BlockSpec { /// Configuration for a test scenario #[derive(Debug, Constructor)] struct TestScenario { - /// Blocks to execute before running the backfill job - blocks_before_backfill: Vec, - /// Blocks to execute after backfill using the live collector - blocks_after_backfill: Vec, + /// Blocks to execute before running the initialization job + blocks_before_initialization: Vec, + /// Blocks to execute after initialization using the live collector + blocks_after_initialization: Vec, } /// Helper to create a chain spec with a genesis account funded @@ -219,8 +219,8 @@ where let mut last_block_hash = genesis_hash; let mut last_block_number = 0u64; - // Execute blocks before backfill - for (idx, block_spec) in scenario.blocks_before_backfill.iter().enumerate() { + // Execute blocks before initialization + for (idx, block_spec) in scenario.blocks_before_initialization.iter().enumerate() { let block_number = idx as u64 + 1; let mut block = create_block_from_spec( block_spec, @@ -241,14 +241,14 @@ where { let provider = provider_factory.db_ref(); let tx = provider.tx()?; - let backfill_job = BackfillJob::new(storage.clone(), &tx); - backfill_job.run(last_block_number, last_block_hash).await?; + let initialization_job = InitializationJob::new(storage.clone(), &tx); + initialization_job.run(last_block_number, last_block_hash).await?; } - // Execute blocks after backfill using live collector + // Execute blocks after initialization using live collector let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); - for (idx, block_spec) in scenario.blocks_after_backfill.iter().enumerate() { + for (idx, block_spec) in scenario.blocks_after_initialization.iter().enumerate() { let block_number = last_block_number + idx as u64 + 1; let mut block = create_block_from_spec( block_spec, @@ -281,7 +281,7 @@ where /// End-to-end test of a single live collector iteration. /// (1) Creates a chain with some state -/// (2) Stores the genesis state into storage via backfill +/// (2) Stores the genesis state into storage via initialization /// (3) Executes a block and calculates the state root using the stored state #[tokio::test] async fn test_execute_and_store_block_updates() { @@ -303,9 +303,9 @@ async fn test_execute_and_store_block_updates() { init_genesis(&provider_factory).unwrap(); // Define the test scenario: - // - No blocks before backfill - // - Backfill to genesis (block 0) - // - Execute one block with a single transaction after backfill + // - No blocks before initialization + // - Initialization to genesis (block 0) + // - Execute one block with a single transaction after initialization let recipient = Address::repeat_byte(0x42); let scenario = TestScenario::new( vec![], @@ -329,10 +329,10 @@ async fn test_execute_and_store_block_updates_missing_parent_block() { let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory).unwrap(); - // No blocks before backfill; backfill only inserts genesis. + // No blocks before initialization; initialization only inserts genesis. let scenario = TestScenario::new(vec![], vec![]); - // Run backfill (block 0 only) + // Run initialization (block 0 only) run_test_scenario( scenario, provider_factory.clone(), @@ -381,7 +381,7 @@ async fn test_execute_and_store_block_updates_state_root_mismatch() { let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory).unwrap(); - // Run normal scenario: no blocks before backfill, one block after. + // Run normal scenario: no blocks before initialization, one block after. let recipient = Address::repeat_byte(0x42); let scenario = TestScenario::new( vec![], @@ -429,9 +429,9 @@ async fn test_execute_and_store_block_updates_state_root_mismatch() { assert!(matches!(err, OpProofsStorageError::StateRootMismatch { .. })); } -/// Test with multiple blocks before and after backfill +/// Test with multiple blocks before and after initialization #[tokio::test] -async fn test_multiple_blocks_before_and_after_backfill() { +async fn test_multiple_blocks_before_and_after_initialization() { let dir = TempDir::new().unwrap(); let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); @@ -444,8 +444,8 @@ async fn test_multiple_blocks_before_and_after_backfill() { init_genesis(&provider_factory).unwrap(); // Define the test scenario: - // - Execute 3 blocks before backfill (will be committed to db) - // - Backfill to block 3 + // - Execute 3 blocks before initialization (will be committed to db) + // - Initialization to block 3 // - Execute 2 more blocks using the live collector let recipient1 = Address::repeat_byte(0x42); let recipient2 = Address::repeat_byte(0x43);