diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 9dd6833c98694..4bd56bc584613 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -42,6 +42,7 @@ revm = { workspace = true } rlp = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true, default-features = false } +serde_json = { features = ["alloc"], workspace = true, default-features = false } # Polkadot SDK Dependencies bn = { workspace = true } diff --git a/substrate/frame/revive/rpc/.sqlx/query-30ae7176a84f6402b1a43037d3c9fb00315b5d17dfe1e8d261df08f71e54e70a.json b/substrate/frame/revive/rpc/.sqlx/query-30ae7176a84f6402b1a43037d3c9fb00315b5d17dfe1e8d261df08f71e54e70a.json new file mode 100644 index 0000000000000..1f324777abf14 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-30ae7176a84f6402b1a43037d3c9fb00315b5d17dfe1e8d261df08f71e54e70a.json @@ -0,0 +1,62 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\tSELECT log_index, address, transaction_hash, topic_0, topic_1, topic_2, topic_3, data\n\t\t\tFROM logs\n\t\t\tWHERE block_hash = $1 AND transaction_index = $2\n\t\t\tORDER BY log_index ASC\n\t\t\t", + "describe": { + "columns": [ + { + "name": "log_index", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "address", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "transaction_hash", + "ordinal": 2, + "type_info": "Blob" + }, + { + "name": "topic_0", + "ordinal": 3, + "type_info": "Blob" + }, + { + "name": "topic_1", + "ordinal": 4, + "type_info": "Blob" + }, + { + "name": "topic_2", + "ordinal": 5, + "type_info": "Blob" + }, + { + "name": "topic_3", + "ordinal": 6, + "type_info": "Blob" + }, + { + "name": "data", + "ordinal": 7, + "type_info": "Blob" + } + ], + "parameters": { + "Right": 2 + }, + "nullable": [ + false, + false, + false, + true, + true, + true, + true, + true + ] + }, + "hash": "30ae7176a84f6402b1a43037d3c9fb00315b5d17dfe1e8d261df08f71e54e70a" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-47b830cef6768ed5b119c74037482baef86a7c3d3469873a205805ef342ba031.json b/substrate/frame/revive/rpc/.sqlx/query-47b830cef6768ed5b119c74037482baef86a7c3d3469873a205805ef342ba031.json new file mode 100644 index 0000000000000..bc7541d5297e2 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-47b830cef6768ed5b119c74037482baef86a7c3d3469873a205805ef342ba031.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\tSELECT substrate_block_hash\n\t\t\tFROM eth_to_substrate_blocks\n\t\t\tWHERE ethereum_block_hash = $1\n\t\t\t", + "describe": { + "columns": [ + { + "name": "substrate_block_hash", + "ordinal": 0, + "type_info": "Blob" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false + ] + }, + "hash": "47b830cef6768ed5b119c74037482baef86a7c3d3469873a205805ef342ba031" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-633be9386b88059d022cf532499da803f842b5c8630d5e9c4c91ae4bc39c168a.json b/substrate/frame/revive/rpc/.sqlx/query-633be9386b88059d022cf532499da803f842b5c8630d5e9c4c91ae4bc39c168a.json new file mode 100644 index 0000000000000..b87ef45bf0de7 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-633be9386b88059d022cf532499da803f842b5c8630d5e9c4c91ae4bc39c168a.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\tINSERT OR REPLACE INTO eth_to_substrate_blocks (ethereum_block_hash, substrate_block_hash, block_number, gas_limit, block_author)\n\t\t\tVALUES ($1, $2, $3, $4, $5)\n\t\t\t", + "describe": { + "columns": [], + "parameters": { + "Right": 5 + }, + "nullable": [] + }, + "hash": "633be9386b88059d022cf532499da803f842b5c8630d5e9c4c91ae4bc39c168a" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-668e8f9a808d1dc82c138f75d60a16aaed5151b5a22ae4ea57c12371ddc56190.json b/substrate/frame/revive/rpc/.sqlx/query-668e8f9a808d1dc82c138f75d60a16aaed5151b5a22ae4ea57c12371ddc56190.json new file mode 100644 index 0000000000000..5ae1423c1d96f --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-668e8f9a808d1dc82c138f75d60a16aaed5151b5a22ae4ea57c12371ddc56190.json @@ -0,0 +1,32 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\tSELECT ethereum_block_hash, gas_limit, block_author\n\t\t\tFROM eth_to_substrate_blocks\n\t\t\tWHERE substrate_block_hash = $1\n\t\t\t", + "describe": { + "columns": [ + { + "name": "ethereum_block_hash", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "gas_limit", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "block_author", + "ordinal": 2, + "type_info": "Blob" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "668e8f9a808d1dc82c138f75d60a16aaed5151b5a22ae4ea57c12371ddc56190" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e.json b/substrate/frame/revive/rpc/.sqlx/query-76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e.json deleted file mode 100644 index e8c40966e8395..0000000000000 --- a/substrate/frame/revive/rpc/.sqlx/query-76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "\n\t\t\t\t\tINSERT OR REPLACE INTO logs(\n\t\t\t\t\t\tblock_hash,\n\t\t\t\t\t\ttransaction_index,\n\t\t\t\t\t\tlog_index,\n\t\t\t\t\t\taddress,\n\t\t\t\t\t\tblock_number,\n\t\t\t\t\t\ttransaction_hash,\n\t\t\t\t\t\ttopic_0, topic_1, topic_2, topic_3,\n\t\t\t\t\t\tdata)\n\t\t\t\t\tVALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)\n\t\t\t\t\t", - "describe": { - "columns": [], - "parameters": { - "Right": 11 - }, - "nullable": [] - }, - "hash": "76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e" -} diff --git a/substrate/frame/revive/rpc/.sqlx/query-c009009858fd2f048c98ba5009867296260bb075618d7e9dfaf6cba1e59fefb8.json b/substrate/frame/revive/rpc/.sqlx/query-c009009858fd2f048c98ba5009867296260bb075618d7e9dfaf6cba1e59fefb8.json new file mode 100644 index 0000000000000..ab3d18135f601 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-c009009858fd2f048c98ba5009867296260bb075618d7e9dfaf6cba1e59fefb8.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\t\t\t\tINSERT OR REPLACE INTO logs(\n\t\t\t\t\t\t\tblock_hash,\n\t\t\t\t\t\t\ttransaction_index,\n\t\t\t\t\t\t\tlog_index,\n\t\t\t\t\t\t\taddress,\n\t\t\t\t\t\t\tblock_number,\n\t\t\t\t\t\t\ttransaction_hash,\n\t\t\t\t\t\t\ttopic_0, topic_1, topic_2, topic_3,\n\t\t\t\t\t\t\tdata)\n\t\t\t\t\t\tVALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)\n\t\t\t\t\t\t", + "describe": { + "columns": [], + "parameters": { + "Right": 11 + }, + "nullable": [] + }, + "hash": "c009009858fd2f048c98ba5009867296260bb075618d7e9dfaf6cba1e59fefb8" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-cddbc4c024622fb7ceaeaa7e1f7d39ac9a81aa8ccaa583dd7160c287e1b9dc2c.json b/substrate/frame/revive/rpc/.sqlx/query-cddbc4c024622fb7ceaeaa7e1f7d39ac9a81aa8ccaa583dd7160c287e1b9dc2c.json new file mode 100644 index 0000000000000..24f0e53698503 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-cddbc4c024622fb7ceaeaa7e1f7d39ac9a81aa8ccaa583dd7160c287e1b9dc2c.json @@ -0,0 +1,32 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\tSELECT status, gas_used, gas_price\n\t\t\tFROM transaction_hashes\n\t\t\tWHERE transaction_hash = $1\n\t\t\t", + "describe": { + "columns": [ + { + "name": "status", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "gas_used", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "gas_price", + "ordinal": 2, + "type_info": "Blob" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "cddbc4c024622fb7ceaeaa7e1f7d39ac9a81aa8ccaa583dd7160c287e1b9dc2c" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-de639fe0ac0c3cfdfbbfa07e81a61dae3a284ad4f7979dacb0ccff32cdc39051.json b/substrate/frame/revive/rpc/.sqlx/query-de639fe0ac0c3cfdfbbfa07e81a61dae3a284ad4f7979dacb0ccff32cdc39051.json deleted file mode 100644 index 6df5453213956..0000000000000 --- a/substrate/frame/revive/rpc/.sqlx/query-de639fe0ac0c3cfdfbbfa07e81a61dae3a284ad4f7979dacb0ccff32cdc39051.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "\n\t\t\t\tINSERT OR REPLACE INTO transaction_hashes (transaction_hash, block_hash, transaction_index)\n\t\t\t\tVALUES ($1, $2, $3)\n\t\t\t\t", - "describe": { - "columns": [], - "parameters": { - "Right": 3 - }, - "nullable": [] - }, - "hash": "de639fe0ac0c3cfdfbbfa07e81a61dae3a284ad4f7979dacb0ccff32cdc39051" -} diff --git a/substrate/frame/revive/rpc/.sqlx/query-f82a6993e314fb2fb57f20552e2b1252e0dcfb05df1e76cdc08337d1b072571d.json b/substrate/frame/revive/rpc/.sqlx/query-f82a6993e314fb2fb57f20552e2b1252e0dcfb05df1e76cdc08337d1b072571d.json new file mode 100644 index 0000000000000..b8e5474309505 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-f82a6993e314fb2fb57f20552e2b1252e0dcfb05df1e76cdc08337d1b072571d.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\t\t\tINSERT OR REPLACE INTO transaction_hashes (transaction_hash, block_hash, transaction_index, status, gas_used, gas_price)\n\t\t\t\t\tVALUES ($1, $2, $3, $4, $5, $6)\n\t\t\t\t\t", + "describe": { + "columns": [], + "parameters": { + "Right": 6 + }, + "nullable": [] + }, + "hash": "f82a6993e314fb2fb57f20552e2b1252e0dcfb05df1e76cdc08337d1b072571d" +} diff --git a/substrate/frame/revive/rpc/migrations/0001_create_transaction_hashes.sql b/substrate/frame/revive/rpc/migrations/0001_create_transaction_hashes.sql index 8fd6b353faa82..3e5a22bcb0725 100644 --- a/substrate/frame/revive/rpc/migrations/0001_create_transaction_hashes.sql +++ b/substrate/frame/revive/rpc/migrations/0001_create_transaction_hashes.sql @@ -14,7 +14,10 @@ CREATE TABLE IF NOT EXISTS transaction_hashes ( transaction_hash BLOB NOT NULL PRIMARY KEY, transaction_index INTEGER NOT NULL, - block_hash BLOB NOT NULL + block_hash BLOB NOT NULL, + status INTEGER NOT NULL, + gas_used BLOB NOT NULL, + gas_price BLOB NOT NULL ); CREATE INDEX IF NOT EXISTS idx_block_hash ON transaction_hashes ( diff --git a/substrate/frame/revive/rpc/migrations/0003_create_eth_substrate_block_mapping.sql b/substrate/frame/revive/rpc/migrations/0003_create_eth_substrate_block_mapping.sql new file mode 100644 index 0000000000000..28721ab9100f6 --- /dev/null +++ b/substrate/frame/revive/rpc/migrations/0003_create_eth_substrate_block_mapping.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS eth_to_substrate_blocks ( + ethereum_block_hash BLOB NOT NULL PRIMARY KEY, + substrate_block_hash BLOB NOT NULL, + block_number INTEGER NOT NULL, + gas_limit BLOB NOT NULL, + block_author BLOB NOT NULL +); + +CREATE INDEX IF NOT EXISTS idx_substrate_block_hash ON eth_to_substrate_blocks ( + substrate_block_hash +); + +CREATE INDEX IF NOT EXISTS idx_block_number ON eth_to_substrate_blocks ( + block_number +); diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index 6a967da875394..92e561148b86d 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -26,14 +26,14 @@ use storage_api::StorageApi; use crate::{ subxt_client::{self, revive::calls::types::EthTransact, SrcChainConfig}, BlockInfoProvider, BlockTag, FeeHistoryProvider, ReceiptProvider, SubxtBlockInfoProvider, - TracerType, TransactionInfo, LOG_TARGET, + TracerType, TransactionInfo, }; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; use pallet_revive::{ evm::{ decode_revert_reason, Block, BlockNumberOrTag, BlockNumberOrTagOrHash, FeeHistoryResult, Filter, GenericTransaction, HashesOrTransactionInfos, Log, ReceiptInfo, SyncingProgress, - SyncingStatus, Trace, TransactionSigned, TransactionTrace, H256, U256, + SyncingStatus, Trace, TransactionSigned, TransactionTrace, H256, }, EthTransactError, }; @@ -128,6 +128,7 @@ pub enum ClientError { #[error("Ethereum block not found")] EthereumBlockNotFound, } +const LOG_TARGET: &str = "eth-rpc::client"; const REVERT_CODE: i32 = 3; impl From for ErrorObjectOwned { @@ -189,7 +190,7 @@ async fn extract_block_timestamp(block: &SubstrateBlock) -> Option { .find_first::() .ok()??; - Some(ext.value.now / 1000) + Some(ext.value.now) } /// Connect to a node at the given URL, and return the underlying API, RPC client, and legacy RPC @@ -223,7 +224,7 @@ impl Client { let (chain_id, max_block_weight) = tokio::try_join!(chain_id(&api), max_block_weight(&api))?; - Ok(Self { + let client = Self { api, rpc_client, rpc, @@ -232,7 +233,69 @@ impl Client { fee_history_provider: FeeHistoryProvider::default(), chain_id, max_block_weight, - }) + }; + + // Initialize genesis block (block 0) if not already present + client.ensure_genesis_block().await?; + + Ok(client) + } + + /// Ensure the genesis block (block 0) is reconstructed and stored. + /// + /// This method checks if block 0 exists in storage. If not, it reconstructs + /// an empty EVM genesis block and stores the mapping between its EVM hash + /// and the Substrate hash. + async fn ensure_genesis_block(&self) -> Result<(), ClientError> { + use pallet_revive::evm::block_hash::{EthereumBlockBuilder, InMemoryStorage}; + + // Try to get genesis block + let genesis_block = match self.block_by_number(0).await? { + Some(block) => block, + None => { + log::warn!(target: LOG_TARGET, "Genesis block (0) not found, skipping initialization"); + return Ok(()); + }, + }; + + let substrate_hash = genesis_block.hash(); + + // Check if genesis block mapping already exists + if self.receipt_provider.get_ethereum_hash(&substrate_hash).await.is_some() { + log::debug!(target: LOG_TARGET, "Genesis block mapping already exists"); + return Ok(()); + } + + log::info!(target: LOG_TARGET, "🏗️ Reconstructing genesis block (block 0)"); + + let runtime_api = self.runtime_api(substrate_hash); + let gas_limit = runtime_api.block_gas_limit().await.unwrap_or_default(); + let block_author = runtime_api.block_author().await.ok().unwrap_or_default(); + let timestamp = extract_block_timestamp(&genesis_block).await.unwrap_or_default(); + + // Build genesis block with no transactions + let mut builder = EthereumBlockBuilder::new(InMemoryStorage::new()); + let (genesis_evm_block, _gas_info) = builder.build( + 0u64.into(), // block number 0 + H256::zero(), // parent hash is zero for genesis + timestamp.into(), // timestamp from substrate block + block_author, // block author + gas_limit, // gas limit + ); + + let ethereum_hash = genesis_evm_block.hash; + + // Store the mapping with metadata + self.receipt_provider + .insert_block_mapping(ðereum_hash, &substrate_hash, 0, &gas_limit, &block_author) + .await?; + + log::info!( + target: LOG_TARGET, + "✅ Genesis block reconstructed: EVM hash {ethereum_hash:?} -> Substrate hash {substrate_hash:?}" + ); + + Ok(()) } /// Subscribe to past blocks executing the callback for each block in `range`. @@ -472,6 +535,28 @@ impl Client { self.receipt_provider.receipts_count_per_block(block_hash).await } + /// Get an EVM transaction receipt by Ethereum hash with automatic resolution. + pub async fn receipt_by_ethereum_hash_and_index( + &self, + ethereum_hash: &H256, + transaction_index: usize, + ) -> Option { + if let Some(substrate_hash) = self.resolve_substrate_hash(ethereum_hash).await { + return self.receipt_by_hash_and_index(&substrate_hash, transaction_index).await; + } + // Fallback: treat as Substrate hash + self.receipt_by_hash_and_index(ethereum_hash, transaction_index).await + } + + /// Get receipts count per block using Ethereum block hash with automatic resolution. + pub async fn receipts_count_per_ethereum_block(&self, ethereum_hash: &H256) -> Option { + if let Some(substrate_hash) = self.resolve_substrate_hash(ethereum_hash).await { + return self.receipts_count_per_block(&substrate_hash).await; + } + // Fallback: treat as Substrate hash + self.receipts_count_per_block(ethereum_hash).await + } + /// Get the system health. pub async fn system_health(&self) -> Result { let health = self.rpc.system_health().await?; @@ -522,6 +607,33 @@ impl Client { self.block_provider.block_by_hash(hash).await } + /// Resolve Ethereum block hash to Substrate block hash, then get the block. + /// This method provides the abstraction layer needed by the RPC APIs. + pub async fn resolve_substrate_hash(&self, ethereum_hash: &H256) -> Option { + self.receipt_provider.get_substrate_hash(ethereum_hash).await + } + + /// Resolve Substrate block hash to Ethereum block hash, then get the block. + /// This method provides the abstraction layer needed by the RPC APIs. + pub async fn resolve_ethereum_hash(&self, substrate_hash: &H256) -> Option { + self.receipt_provider.get_ethereum_hash(substrate_hash).await + } + + /// Get a block by Ethereum hash with automatic resolution to Substrate hash. + /// Falls back to treating the hash as a Substrate hash if no mapping exists. + pub async fn block_by_ethereum_hash( + &self, + ethereum_hash: &H256, + ) -> Result>, ClientError> { + // First try to resolve the Ethereum hash to a Substrate hash + if let Some(substrate_hash) = self.resolve_substrate_hash(ethereum_hash).await { + return self.block_by_hash(&substrate_hash).await; + } + + // Fallback: treat the provided hash as a Substrate hash (backward compatibility) + self.block_by_hash(ethereum_hash).await + } + /// Get a block by number pub async fn block_by_number( &self, @@ -620,9 +732,12 @@ impl Client { block: Arc, hydrated_transactions: bool, ) -> Block { + log::trace!(target: LOG_TARGET, "Get EVM block for hash {:?}", block.hash()); + let storage_api = self.storage_api(block.hash()); let ethereum_block = storage_api.get_ethereum_block().await.inspect_err(|err| { - log::error!(target: LOG_TARGET, "Failed to get Ethereum block for hash {:?}: {err:?}", block.hash()); + log::warn!(target: LOG_TARGET, "Failed to get EVM block from storage for hash {:?}: {err:?}", block.hash()); + log::warn!(target: LOG_TARGET, "Will try to reconstruct the block from db"); }); // This could potentially fail under two circumstances: @@ -630,6 +745,8 @@ impl Client { // - the node we are targeting has an outdated revive pallet (or ETH block functionality is // disabled) if let Ok(mut eth_block) = ethereum_block { + log::trace!(target: LOG_TARGET, "Ethereum block from storage hash {:?}", eth_block.hash); + // This means we can live with the hashes returned by the Revive pallet. if !hydrated_transactions { return eth_block; @@ -663,6 +780,9 @@ impl Client { } /// Get the EVM block for the given block and receipts. + /// + /// This method properly reconstructs an Ethereum block using the same logic as on-chain + /// block building, ensuring correct parent_hash linkage in the EVM block chain. pub async fn evm_block_from_receipts( &self, block: &SubstrateBlock, @@ -670,49 +790,83 @@ impl Client { signed_txs: Vec, hydrated_transactions: bool, ) -> Block { - let runtime_api = self.runtime_api(block.hash()); - let gas_limit = runtime_api.block_gas_limit().await.unwrap_or_default(); + use pallet_revive::evm::block_hash::{ + AccumulateReceipt, EthereumBlockBuilder, InMemoryStorage, + }; + + log::trace!(target: LOG_TARGET, "Reconstructing EVM block for substrate block {:?}", block.hash()); - let header = block.header(); let timestamp = extract_block_timestamp(block).await.unwrap_or_default(); - let block_author = runtime_api.block_author().await.ok().unwrap_or_default(); - // TODO: remove once subxt is updated - let parent_hash = header.parent_hash.0.into(); - let state_root = header.state_root.0.into(); - let extrinsics_root = header.extrinsics_root.0.into(); + let (expected_evm_block_hash, gas_limit, block_author) = + self.receipt_provider.get_block_mapping(&block.hash()).await + .unwrap_or_else(|| { + log::warn!(target: LOG_TARGET, "No mapping found for substrate block {:?}, restoring defaults", block.hash()); + Default::default() + }); + + // Build block using the proper EthereumBlockBuilder + let mut builder = EthereumBlockBuilder::new(InMemoryStorage::new()); + + // Process each transaction with its receipt + for (signed_tx, receipt) in signed_txs.iter().zip(receipts.iter()) { + let tx_encoded = signed_tx.signed_payload(); + + // Reconstruct logs from receipt + let mut accumulate_receipt = AccumulateReceipt::new(); + for log in &receipt.logs { + let data = log.data.as_ref().map(|d| d.0.as_slice()).unwrap_or(&[]); + accumulate_receipt.add_log(&log.address, data, &log.topics); + } - let gas_used = receipts.iter().fold(U256::zero(), |acc, receipt| acc + receipt.gas_used); - let transactions = if hydrated_transactions { - signed_txs - .into_iter() - .zip(receipts.iter()) - .map(|(signed_tx, receipt)| TransactionInfo::new(receipt, signed_tx)) - .collect::>() - .into() + // Process the transaction + builder.process_transaction( + tx_encoded, + receipt.status.unwrap_or_default() == 1.into(), + receipt.gas_used.as_u64().into(), + accumulate_receipt.encoding, + accumulate_receipt.bloom, + ); + } + + // Get parent EVM block hash (not Substrate hash!) + // This is crucial for maintaining the EVM block chain integrity + let parent_evm_hash = if block.number() > 1 { + let parent_substrate_hash = block.header().parent_hash; + // Try to resolve to EVM hash, fallback to substrate hash for backwards compatibility + self.resolve_ethereum_hash(&parent_substrate_hash) + .await + .unwrap_or(parent_substrate_hash) } else { - receipts - .iter() - .map(|receipt| receipt.transaction_hash) - .collect::>() - .into() + H256::zero() // Genesis block }; - Block { - hash: block.hash(), - parent_hash, - state_root, - miner: block_author, - transactions_root: extrinsics_root, - number: header.number.into(), - timestamp: timestamp.into(), - base_fee_per_gas: runtime_api.gas_price().await.ok().unwrap_or_default(), + // Build the Ethereum block with correct parent hash + let (mut evm_block, _gas_info) = builder.build( + block.header().number.into(), + parent_evm_hash, + timestamp.into(), + block_author, gas_limit, - gas_used, - receipts_root: extrinsics_root, - transactions, - ..Default::default() + ); + + // Sanity check + let evm_block_hash = evm_block.header_hash(); + if expected_evm_block_hash != evm_block_hash { + log::warn!(target: LOG_TARGET, "Reconstructed EVM block hash mismatch hash: {evm_block_hash:} != {expected_evm_block_hash:?}"); } + + // Optionally hydrate with full transaction info + if hydrated_transactions { + evm_block.transactions = signed_txs + .into_iter() + .zip(receipts.iter()) + .map(|(tx, receipt)| TransactionInfo::new(receipt, tx)) + .collect::>() + .into(); + } + + evm_block } /// Get the chain ID. diff --git a/substrate/frame/revive/rpc/src/client/storage_api.rs b/substrate/frame/revive/rpc/src/client/storage_api.rs index d39a3a963f480..bb7addaeff65f 100644 --- a/substrate/frame/revive/rpc/src/client/storage_api.rs +++ b/substrate/frame/revive/rpc/src/client/storage_api.rs @@ -23,11 +23,13 @@ use crate::{ }, ClientError, H160, }; -use sp_core::H256; +use sp_core::{H256, U256}; use subxt::{storage::Storage, OnlineClient}; use pallet_revive::evm::{block_hash::ReceiptGasInfo, Block}; +const LOG_TARGET: &str = "eth-rpc::storage_api"; + /// A wrapper around the Substrate Storage API. #[derive(Clone)] pub struct StorageApi(Storage>); @@ -66,34 +68,41 @@ impl StorageApi { /// Get the receipt data from storage. pub async fn get_receipt_data(&self) -> Result, ClientError> { - let query = subxt::dynamic::storage("Revive", "ReceiptInfoData", ()); + let query = subxt_client::storage().revive().receipt_info_data(); - let Some(info) = self.0.fetch(&query).await? else { + let Some(receipt_info_data) = self.0.fetch(&query).await? else { + log::warn!(target: LOG_TARGET, "Receipt data not found"); return Err(ClientError::ReceiptDataNotFound); }; - let bytes = info.into_encoded(); - codec::Decode::decode(&mut &bytes[..]).map_err(|err| err.into()) + log::trace!(target: LOG_TARGET, "Receipt data found"); + let receipt_info_data = receipt_info_data.into_iter().map(|entry| entry.0).collect(); + Ok(receipt_info_data) } /// Get the Ethereum block from storage. pub async fn get_ethereum_block(&self) -> Result { - let query = subxt::dynamic::storage("Revive", "EthereumBlock", ()); - - let Some(info) = self.0.fetch(&query).await? else { + let query = subxt_client::storage().revive().ethereum_block(); + let Some(block) = self.0.fetch(&query).await? else { + log::warn!(target: LOG_TARGET, "Ethereum block not found"); return Err(ClientError::EthereumBlockNotFound); }; - let bytes = info.into_encoded(); - codec::Decode::decode(&mut &bytes[..]).map_err(|err| err.into()) + log::trace!(target: LOG_TARGET, "Ethereum block found hash: {:?}", block.hash); + Ok(block.0) } pub async fn get_ethereum_block_hash(&self, number: u64) -> Result { - let key: subxt::dynamic::Value = number.into(); - let query = subxt::dynamic::storage("Revive", "BlockHash", vec![key]); + // Convert u64 to the wrapped U256 type that subxt expects + let number_u256 = subxt::utils::Static(U256::from(number)); - let Some(info) = self.0.fetch(&query).await? else { + let query = subxt_client::storage().revive().block_hash(number_u256); + + let Some(hash) = self.0.fetch(&query).await? else { + log::warn!(target: LOG_TARGET, "Ethereum block #{number} hash not found"); return Err(ClientError::EthereumBlockNotFound); }; - let bytes = info.into_encoded(); - codec::Decode::decode(&mut &bytes[..]).map_err(|err| err.into()) + + log::trace!(target: LOG_TARGET, "Ethereum block #{number} hash: {hash:?}"); + + Ok(hash) } } diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index 3cdb55f6f081a..6f66c3357ae2f 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -214,7 +214,7 @@ impl EthRpcServer for EthRpcServerImpl { block_hash: H256, hydrated_transactions: bool, ) -> RpcResult> { - let Some(block) = self.client.block_by_hash(&block_hash).await? else { + let Some(block) = self.client.block_by_ethereum_hash(&block_hash).await? else { return Ok(None); }; let block = self.client.evm_block(block, hydrated_transactions).await; @@ -275,7 +275,7 @@ impl EthRpcServer for EthRpcServerImpl { } else { self.client.latest_block().await.hash() }; - Ok(self.client.receipts_count_per_block(&block_hash).await.map(U256::from)) + Ok(self.client.receipts_count_per_ethereum_block(&block_hash).await.map(U256::from)) } async fn get_block_transaction_count_by_number( @@ -314,22 +314,15 @@ impl EthRpcServer for EthRpcServerImpl { block_hash: H256, transaction_index: U256, ) -> RpcResult> { - let Some(receipt) = self - .client - .receipt_by_hash_and_index( - &block_hash, - transaction_index.try_into().map_err(|_| EthRpcError::ConversionError)?, - ) - .await + let Some(substrate_block_hash) = self.client.resolve_substrate_hash(&block_hash).await else { return Ok(None); }; - - let Some(signed_tx) = self.client.signed_tx_by_hash(&receipt.transaction_hash).await else { - return Ok(None); - }; - - Ok(Some(TransactionInfo::new(&receipt, signed_tx))) + self.get_transaction_by_substrate_block_hash_and_index( + substrate_block_hash, + transaction_index, + ) + .await } async fn get_transaction_by_block_number_and_index( @@ -340,7 +333,7 @@ impl EthRpcServer for EthRpcServerImpl { let Some(block) = self.client.block_by_number_or_tag(&block).await? else { return Ok(None); }; - self.get_transaction_by_block_hash_and_index(block.hash(), transaction_index) + self.get_transaction_by_substrate_block_hash_and_index(block.hash(), transaction_index) .await } @@ -386,3 +379,27 @@ impl EthRpcServer for EthRpcServerImpl { Ok(result) } } + +impl EthRpcServerImpl { + async fn get_transaction_by_substrate_block_hash_and_index( + &self, + substrate_block_hash: H256, + transaction_index: U256, + ) -> RpcResult> { + let Some(receipt) = self + .client + .receipt_by_hash_and_index( + &substrate_block_hash, + transaction_index.try_into().map_err(|_| EthRpcError::ConversionError)?, + ) + .await + else { + return Ok(None) + }; + let Some(signed_tx) = self.client.signed_tx_by_hash(&receipt.transaction_hash).await else { + return Ok(None); + }; + + Ok(Some(TransactionInfo::new(&receipt, signed_tx))) + } +} diff --git a/substrate/frame/revive/rpc/src/receipt_extractor.rs b/substrate/frame/revive/rpc/src/receipt_extractor.rs index 0b1fd57e039be..c80a814e203f9 100644 --- a/substrate/frame/revive/rpc/src/receipt_extractor.rs +++ b/substrate/frame/revive/rpc/src/receipt_extractor.rs @@ -34,10 +34,76 @@ use pallet_revive::{ U256, }, }; -use sp_core::keccak_256; +use sp_core::{keccak_256, H160}; use std::{future::Future, pin::Pin, sync::Arc}; use subxt::{blocks::ExtrinsicDetails, OnlineClient}; +/// Helper function to decode a transaction and recover the sender address. +/// Returns (signed_tx, transaction_hash, from_address) +pub fn decode_and_recover_tx( + call: &EthTransact, +) -> Result<(TransactionSigned, H256, H160), ClientError> { + let transaction_hash = H256(keccak_256(&call.payload)); + let signed_tx = + TransactionSigned::decode(&call.payload).map_err(|_| ClientError::TxDecodingFailed)?; + let from = signed_tx.recover_eth_address().map_err(|_| { + log::error!(target: LOG_TARGET, "Failed to recover eth address from signed tx"); + ClientError::RecoverEthAddressFailed + })?; + Ok((signed_tx, transaction_hash, from)) +} + +/// Helper function to calculate contract address for CREATE transactions. +pub fn calculate_contract_address( + from: &H160, + to: Option, + nonce: Option, +) -> Result, ClientError> { + if to.is_none() { + let nonce_u64 = nonce + .unwrap_or_default() + .try_into() + .map_err(|_| ClientError::ConversionFailed)?; + Ok(Some(create1(from, nonce_u64))) + } else { + Ok(None) + } +} + +/// Helper to build a receipt from decoded transaction and collected data. +/// This encapsulates the common logic of creating GenericTransaction, calculating +/// contract address, and assembling the final ReceiptInfo. +pub fn build_receipt_from_tx( + signed_tx: &TransactionSigned, + transaction_hash: H256, + from: H160, + gas_price: U256, + gas_used: U256, + status: bool, + logs: Vec, + eth_block_hash: H256, + block_number: U256, + transaction_index: usize, +) -> Result { + let tx_info = GenericTransaction::from_signed(signed_tx.clone(), gas_price, Some(from)); + let contract_address = calculate_contract_address(&from, tx_info.to, tx_info.nonce)?; + + Ok(ReceiptInfo::new( + eth_block_hash, + block_number, + contract_address, + from, + logs, + tx_info.to, + gas_price, + gas_used, + status, + transaction_hash, + transaction_index.into(), + tx_info.r#type.unwrap_or_default(), + )) +} + type FetchGasPriceFn = Arc< dyn Fn(H256) -> Pin> + Send>> + Send + Sync, >; @@ -49,6 +115,14 @@ type FetchReceiptDataFn = Arc< type FetchEthBlockHashFn = Arc Pin> + Send>> + Send + Sync>; +type FetchBlockGasLimitFn = Arc< + dyn Fn(H256) -> Pin> + Send>> + Send + Sync, +>; + +type FetchBlockAuthorFn = Arc< + dyn Fn(H256) -> Pin> + Send>> + Send + Sync, +>; + /// Utility to extract receipts from extrinsics. #[derive(Clone)] pub struct ReceiptExtractor { @@ -61,6 +135,12 @@ pub struct ReceiptExtractor { /// Fetch the gas price from the chain. fetch_gas_price: FetchGasPriceFn, + /// Fetch the block gas limit from the chain. + fetch_block_gas_limit: FetchBlockGasLimitFn, + + /// Fetch the block author from the chain. + fetch_block_author: FetchBlockAuthorFn, + /// The native to eth decimal ratio, used to calculated gas from native fees. native_to_eth_ratio: u32, @@ -125,10 +205,40 @@ impl ReceiptExtractor { Box::pin(fut) as Pin> }); + let api_inner = api.clone(); + let fetch_block_gas_limit = Arc::new(move |block_hash| { + let api_inner = api_inner.clone(); + + let fut = async move { + let runtime_api = api_inner.runtime_api().at(block_hash); + let payload = subxt_client::apis().revive_api().block_gas_limit(); + let gas_limit = runtime_api.call(payload).await?; + Ok(*gas_limit) + }; + + Box::pin(fut) as Pin> + }); + + let api_inner = api.clone(); + let fetch_block_author = Arc::new(move |block_hash| { + let api_inner = api_inner.clone(); + + let fut = async move { + let runtime_api = api_inner.runtime_api().at(block_hash); + let payload = subxt_client::apis().revive_api().block_author(); + let author = runtime_api.call(payload).await?; + Ok(author) + }; + + Box::pin(fut) as Pin> + }); + Ok(Self { fetch_receipt_data, fetch_eth_block_hash, fetch_gas_price, + fetch_block_gas_limit, + fetch_block_author, native_to_eth_ratio, earliest_receipt_block, }) @@ -137,15 +247,26 @@ impl ReceiptExtractor { #[cfg(test)] pub fn new_mock() -> Self { let fetch_receipt_data = Arc::new(|_| Box::pin(std::future::ready(None)) as Pin>); - let fetch_eth_block_hash = - Arc::new(|_, _| Box::pin(std::future::ready(None)) as Pin>); + // This method is useful when testing eth - substrate mapping. + let fetch_eth_block_hash = Arc::new(|block_hash: H256, block_number: u64| { + // Generate hash from substrate block hash and number + let bytes: Vec = [block_hash.as_bytes(), &block_number.to_be_bytes()].concat(); + let eth_block_hash = H256::from(keccak_256(&bytes)); + Box::pin(std::future::ready(Some(eth_block_hash))) as Pin> + }); let fetch_gas_price = Arc::new(|_| Box::pin(std::future::ready(Ok(U256::from(1000)))) as Pin>); + let fetch_block_gas_limit = + Arc::new(|_| Box::pin(std::future::ready(Ok(U256::from(30_000_000)))) as Pin>); + let fetch_block_author = + Arc::new(|_| Box::pin(std::future::ready(Ok(H160::zero()))) as Pin>); Self { fetch_receipt_data, fetch_eth_block_hash, fetch_gas_price, + fetch_block_gas_limit, + fetch_block_author, native_to_eth_ratio: 1_000_000, earliest_receipt_block: None, } @@ -154,14 +275,15 @@ impl ReceiptExtractor { /// Extract a [`TransactionSigned`] and a [`ReceiptInfo`] from an extrinsic. async fn extract_from_extrinsic( &self, - block_number: U256, - block_hash: H256, + substrate_block: &SubstrateBlock, + eth_block_hash: H256, ext: subxt::blocks::ExtrinsicDetails>, call: EthTransact, maybe_receipt: Option, + transaction_index: usize, ) -> Result<(TransactionSigned, ReceiptInfo), ClientError> { - let transaction_index = ext.index(); let events = ext.events().await?; + let block_number: U256 = substrate_block.number().into(); let success = events.has::().inspect_err(|err| { log::debug!( @@ -176,16 +298,10 @@ impl ReceiptExtractor { .inspect_err( |err| log::debug!(target: LOG_TARGET, "TransactionFeePaid not found in events for block {block_number}\n{err:?}") )?; - let transaction_hash = H256(keccak_256(&call.payload)); - let signed_tx = - TransactionSigned::decode(&call.payload).map_err(|_| ClientError::TxDecodingFailed)?; - let from = signed_tx.recover_eth_address().map_err(|_| { - log::error!(target: LOG_TARGET, "Failed to recover eth address from signed tx"); - ClientError::RecoverEthAddressFailed - })?; + let (signed_tx, transaction_hash, from) = decode_and_recover_tx(&call)?; - let base_gas_price = (self.fetch_gas_price)(block_hash).await?; + let base_gas_price = (self.fetch_gas_price)(substrate_block.hash()).await?; let tx_info = GenericTransaction::from_signed(signed_tx.clone(), base_gas_price, Some(from)); @@ -215,40 +331,26 @@ impl ReceiptExtractor { block_number, transaction_hash, transaction_index: transaction_index.into(), - block_hash, + block_hash: eth_block_hash, log_index: event_details.index().into(), ..Default::default() }) }) .collect(); - let contract_address = if tx_info.to.is_none() { - Some(create1( - &from, - tx_info - .nonce - .unwrap_or_default() - .try_into() - .map_err(|_| ClientError::ConversionFailed)?, - )) - } else { - None - }; - - let receipt = ReceiptInfo::new( - block_hash, - block_number, - contract_address, + let receipt = build_receipt_from_tx( + &signed_tx, + transaction_hash, from, - logs, - tx_info.to, gas_price, gas_used, success, - transaction_hash, - transaction_index.into(), - tx_info.r#type.unwrap_or_default(), - ); + logs, + eth_block_hash, + block_number, + transaction_index, + )?; + Ok((signed_tx, receipt)) } @@ -270,23 +372,17 @@ impl ReceiptExtractor { .await .unwrap_or(substrate_block_hash); - // TODO: Order of receipt and transaction info is important while building - // the state tries. Are we sorting them afterwards? + // Process extrinsics in order while maintaining parallelism within buffer window stream::iter(ext_iter) - .map(|(ext, call, receipt)| async move { - self.extract_from_extrinsic( - substrate_block_number.into(), - eth_block_hash, - ext, - call, - receipt, - ) - .await - .inspect_err(|err| { - log::warn!(target: LOG_TARGET, "Error extracting extrinsic: {err:?}"); - }) + .enumerate() + .map(|(idx, (ext, call, receipt))| async move { + self.extract_from_extrinsic(block, eth_block_hash, ext, call, receipt, idx) + .await + .inspect_err(|err| { + log::warn!(target: LOG_TARGET, "Error extracting extrinsic: {err:?}"); + }) }) - .buffer_unordered(10) + .buffered(10) .collect::>>() .await .into_iter() @@ -294,7 +390,7 @@ impl ReceiptExtractor { } /// Return the ETH extrinsics of the block grouped with reconstruction receipt info. - async fn get_block_extrinsics( + pub async fn get_block_extrinsics( &self, block: &SubstrateBlock, ) -> Result< @@ -356,7 +452,7 @@ impl ReceiptExtractor { let (ext, eth_call, maybe_receipt) = ext_iter .into_iter() - .find(|(e, _, _)| e.index() as usize == transaction_index) + .nth(transaction_index) .ok_or(ClientError::EthExtrinsicNotFound)?; let substrate_block_number = block.number() as u64; @@ -367,12 +463,32 @@ impl ReceiptExtractor { .unwrap_or(substrate_block_hash); self.extract_from_extrinsic( - substrate_block_number.into(), + block, eth_block_hash, ext, eth_call, maybe_receipt, + transaction_index, ) .await } + + /// Get the Ethereum block hash for the given Substrate block. + pub async fn get_ethereum_block_hash( + &self, + block_hash: &H256, + block_number: u64, + ) -> Option { + (self.fetch_eth_block_hash)(*block_hash, block_number).await + } + + /// Get the block gas limit for the given block hash. + pub async fn block_gas_limit(&self, block_hash: H256) -> Result { + (self.fetch_block_gas_limit)(block_hash).await + } + + /// Get the block author for the given block hash. + pub async fn block_author(&self, block_hash: H256) -> Result { + (self.fetch_block_author)(block_hash).await + } } diff --git a/substrate/frame/revive/rpc/src/receipt_provider.rs b/substrate/frame/revive/rpc/src/receipt_provider.rs index 01374d5763b26..84a763b2345c7 100644 --- a/substrate/frame/revive/rpc/src/receipt_provider.rs +++ b/substrate/frame/revive/rpc/src/receipt_provider.rs @@ -16,11 +16,12 @@ // limitations under the License. use crate::{ client::{SubstrateBlock, SubstrateBlockNumber}, + receipt_extractor::{build_receipt_from_tx, decode_and_recover_tx}, Address, AddressOrAddresses, BlockInfoProvider, BlockNumberOrTag, BlockTag, Bytes, ClientError, - FilterTopic, ReceiptExtractor, SubxtBlockInfoProvider, LOG_TARGET, + FilterTopic, ReceiptExtractor, SubxtBlockInfoProvider, }; use pallet_revive::evm::{Filter, Log, ReceiptInfo, TransactionSigned}; -use sp_core::{H256, U256}; +use sp_core::{H160, H256, U256}; use sqlx::{query, QueryBuilder, Row, Sqlite, SqlitePool}; use std::{ collections::{BTreeMap, HashMap}, @@ -28,6 +29,8 @@ use std::{ }; use tokio::sync::Mutex; +const LOG_TARGET: &str = "eth-rpc::receipt_provider"; + /// ReceiptProvider stores transaction receipts and logs in a SQLite database. #[derive(Clone)] pub struct ReceiptProvider { @@ -99,6 +102,165 @@ impl ReceiptProvider { Some((block_hash, transaction_index)) } + /// Fetch receipt metadata from the database for a specific transaction. + /// Returns (status, gas_used, gas_price) + async fn fetch_receipt_metadata(&self, transaction_hash: &H256) -> Option<(bool, U256, U256)> { + let transaction_hash = transaction_hash.as_ref(); + let result = query!( + r#" + SELECT status, gas_used, gas_price + FROM transaction_hashes + WHERE transaction_hash = $1 + "#, + transaction_hash + ) + .fetch_optional(&self.pool) + .await + .ok()??; + + let status = result.status != 0; + let gas_used = U256::from_big_endian(&result.gas_used); + let gas_price = U256::from_big_endian(&result.gas_price); + + Some((status, gas_used, gas_price)) + } + + /// Insert a block mapping from Ethereum block hash to Substrate block hash. + pub async fn insert_block_mapping( + &self, + ethereum_block_hash: &H256, + substrate_block_hash: &H256, + block_number: u64, + gas_limit: &U256, + block_author: &H160, + ) -> Result<(), ClientError> { + let ethereum_hash = ethereum_block_hash.as_ref(); + let substrate_hash = substrate_block_hash.as_ref(); + let block_number = block_number as i64; + + let gas_limit = gas_limit.to_big_endian(); + let gas_limit_bytes = gas_limit.as_ref(); + let block_author_bytes = block_author.as_bytes(); + + query!( + r#" + INSERT OR REPLACE INTO eth_to_substrate_blocks (ethereum_block_hash, substrate_block_hash, block_number, gas_limit, block_author) + VALUES ($1, $2, $3, $4, $5) + "#, + ethereum_hash, + substrate_hash, + block_number, + gas_limit_bytes, + block_author_bytes + ) + .execute(&self.pool) + .await?; + + log::trace!(target: LOG_TARGET, "Insert block mapping ethereum block: {ethereum_block_hash:?} -> substrate block: {substrate_block_hash:?}"); + Ok(()) + } + + /// Get block metadata (ethereum hash, gas limit, block author) for the given Substrate block + /// hash. Returns a tuple of (ethereum_block_hash, gas_limit, block_author). + pub async fn get_block_mapping( + &self, + substrate_block_hash: &H256, + ) -> Option<(H256, U256, sp_core::H160)> { + let substrate_hash = substrate_block_hash.as_ref(); + let result = query!( + r#" + SELECT ethereum_block_hash, gas_limit, block_author + FROM eth_to_substrate_blocks + WHERE substrate_block_hash = $1 + "#, + substrate_hash + ) + .fetch_optional(&self.pool) + .await + .ok()??; + + let ethereum_block_hash = H256::from_slice(&result.ethereum_block_hash[..]); + + // Deserialize gas_limit from big-endian bytes + let gas_limit = U256::from_big_endian(&result.gas_limit[..]); + + // Deserialize block_author from bytes + let block_author = sp_core::H160::from_slice(&result.block_author[..]); + + Some((ethereum_block_hash, gas_limit, block_author)) + } + + /// Get the Substrate block hash for the given Ethereum block hash. + pub async fn get_substrate_hash(&self, ethereum_block_hash: &H256) -> Option { + let ethereum_hash = ethereum_block_hash.as_ref(); + let result = query!( + r#" + SELECT substrate_block_hash + FROM eth_to_substrate_blocks + WHERE ethereum_block_hash = $1 + "#, + ethereum_hash + ) + .fetch_optional(&self.pool) + .await + .inspect_err(|e| { + log::error!(target: LOG_TARGET, "failed to get block mapping for ethereum block {ethereum_block_hash:?}, err: {e:?}"); + }) + .ok()? + .or_else(||{ + log::trace!(target: LOG_TARGET, "No block mapping found for ethereum block: {ethereum_block_hash:?}"); + None + })?; + + log::trace!(target: LOG_TARGET, "Get block mapping ethereum block: {:?} -> substrate block: {ethereum_block_hash:?}", H256::from_slice(&result.substrate_block_hash[..])); + + Some(H256::from_slice(&result.substrate_block_hash[..])) + } + + /// Get the Ethereum block hash for the given Substrate block hash. + pub async fn get_ethereum_hash(&self, substrate_block_hash: &H256) -> Option { + let (ethereum_hash, _, _) = self.get_block_mapping(substrate_block_hash).await?; + log::trace!(target: LOG_TARGET, "Get block mapping substrate block: {substrate_block_hash:?} -> ethereum block: {ethereum_hash:?}"); + Some(ethereum_hash) + } + + /// Get the block gas limit for the given Substrate block hash. + pub async fn get_block_gas_limit(&self, substrate_block_hash: &H256) -> Option { + let (_, gas_limit, _) = self.get_block_mapping(substrate_block_hash).await?; + Some(gas_limit) + } + + /// Get the block author for the given Substrate block hash. + pub async fn get_block_author(&self, substrate_block_hash: &H256) -> Option { + let (_, _, block_author) = self.get_block_mapping(substrate_block_hash).await?; + Some(block_author) + } + + /// Remove block mappings for the given Ethereum block hashes. + pub async fn remove_block_mappings( + &self, + ethereum_block_hashes: &[H256], + ) -> Result<(), ClientError> { + if ethereum_block_hashes.is_empty() { + return Ok(()); + } + + log::trace!(target: LOG_TARGET, "Removing block mappings: {ethereum_block_hashes:?}"); + + let placeholders = vec!["?"; ethereum_block_hashes.len()].join(", "); + let sql = format!( + "DELETE FROM eth_to_substrate_blocks WHERE ethereum_block_hash in ({placeholders})" + ); + let mut query = sqlx::query(&sql); + + for ethereum_hash in ethereum_block_hashes { + query = query.bind(ethereum_hash.as_ref()); + } + + query.execute(&self.pool).await?; + Ok(()) + } + /// Deletes older records from the database. pub async fn remove(&self, block_hashes: &[H256]) -> Result<(), ClientError> { if block_hashes.is_empty() { @@ -113,14 +275,21 @@ impl ReceiptProvider { let sql = format!("DELETE FROM logs WHERE block_hash in ({placeholders})"); let mut delete_logs_query = sqlx::query(&sql); + let sql = format!( + "DELETE FROM eth_to_substrate_blocks WHERE substrate_block_hash in ({placeholders})" + ); + let mut delete_mappings_query = sqlx::query(&sql); + for block_hash in block_hashes { delete_tx_query = delete_tx_query.bind(block_hash.as_ref()); delete_logs_query = delete_logs_query.bind(block_hash.as_ref()); + delete_mappings_query = delete_mappings_query.bind(block_hash.as_ref()); } let delete_transaction_hashes = delete_tx_query.execute(&self.pool); let delete_logs = delete_logs_query.execute(&self.pool); - tokio::try_join!(delete_transaction_hashes, delete_logs)?; + let delete_mappings = delete_mappings_query.execute(&self.pool); + tokio::try_join!(delete_transaction_hashes, delete_logs, delete_mappings)?; Ok(()) } @@ -133,12 +302,133 @@ impl ReceiptProvider { } } + /// Reconstruct a single receipt from the database by transaction index. + /// This method can work even when events and storage are unavailable. + async fn receipt_from_db_by_index( + &self, + block: &SubstrateBlock, + transaction_index: usize, + call: &crate::subxt_client::revive::calls::types::EthTransact, + ) -> Result<(TransactionSigned, ReceiptInfo), ClientError> { + let substrate_block_hash = block.hash(); + let substrate_block_number = block.number() as u64; + let block_number: U256 = substrate_block_number.into(); + + // Get Ethereum block hash from DB mapping + let eth_block_hash = self + .get_ethereum_hash(&substrate_block_hash) + .await + .unwrap_or(substrate_block_hash); + + // Decode transaction and recover sender + let (signed_tx, transaction_hash, from) = decode_and_recover_tx(call)?; + + // Query receipt metadata from DB + let (status, gas_used, gas_price) = + self.fetch_receipt_metadata(&transaction_hash).await.ok_or_else(|| { + log::warn!( + target: LOG_TARGET, + "Receipt metadata not found in DB for tx {transaction_hash:?}" + ); + ClientError::TxFeeNotFound + })?; + + // Query logs from DB + let block_hash_ref = substrate_block_hash.as_ref(); + let transaction_index_i32 = transaction_index as i32; + let logs_rows = query!( + r#" + SELECT log_index, address, transaction_hash, topic_0, topic_1, topic_2, topic_3, data + FROM logs + WHERE block_hash = $1 AND transaction_index = $2 + ORDER BY log_index ASC + "#, + block_hash_ref, + transaction_index_i32 + ) + .fetch_all(&self.pool) + .await?; + + let logs: Vec = logs_rows + .into_iter() + .map(|row| { + let topics = [row.topic_0, row.topic_1, row.topic_2, row.topic_3] + .iter() + .filter_map(|t| t.as_ref().map(|bytes| H256::from_slice(bytes))) + .collect(); + + Log { + address: H160::from_slice(&row.address), + block_hash: eth_block_hash, + block_number, + data: row.data.map(Bytes::from), + log_index: U256::from(row.log_index as u64), + topics, + transaction_hash, + transaction_index: transaction_index.into(), + removed: false, + } + }) + .collect(); + + // Build the receipt using the common helper + let receipt = build_receipt_from_tx( + &signed_tx, + transaction_hash, + from, + gas_price, + gas_used, + status, + logs, + eth_block_hash, + block_number, + transaction_index, + )?; + + Ok((signed_tx, receipt)) + } + + /// Reconstruct receipts from the database when on-chain state has been pruned. + /// This method can work even when events and storage are unavailable. + pub async fn receipts_from_db( + &self, + block: &SubstrateBlock, + ) -> Result, ClientError> { + // Get extrinsics from block (this works even when state is pruned) + let ext_iter = self.receipt_extractor.get_block_extrinsics(block).await?; + + let mut receipts = Vec::new(); + + for (transaction_index, (_, call, _)) in ext_iter.enumerate() { + let receipt = self.receipt_from_db_by_index(block, transaction_index, &call).await?; + receipts.push(receipt); + } + + Ok(receipts) + } + /// Fetch receipts from the given block. + /// This method first attempts to extract receipts from on-chain data (events/storage). + /// If that fails (e.g., due to pruned state), it falls back to reconstructing receipts from the + /// database. pub async fn receipts_from_block( &self, block: &SubstrateBlock, ) -> Result, ClientError> { - self.receipt_extractor.extract_from_block(block).await + // Try on-chain extraction first + match self.receipt_extractor.extract_from_block(block).await { + Ok(receipts) => Ok(receipts), + Err(err) => { + log::debug!( + target: LOG_TARGET, + "On-chain receipt extraction failed for block #{} ({:?}), falling back to DB: {err:?}", + block.number(), + block.hash() + ); + // Fall back to DB reconstruction when state is pruned + self.receipts_from_db(block).await + }, + } } /// Extract and insert receipts from the given block. @@ -151,37 +441,12 @@ impl ReceiptProvider { Ok(receipts) } - /// Insert receipts into the provider. - /// - /// Note: Can be merged into `insert_block_receipts` once is fixed and subxt let - /// us create Mock `SubstrateBlock` - async fn insert( - &self, - block: &impl BlockInfo, - receipts: &[(TransactionSigned, ReceiptInfo)], - ) -> Result<(), ClientError> { - if receipts.is_empty() { - return Ok(()); - } - - let block_hash = block.hash(); - let block_hash_ref = block_hash.as_ref(); - let block_number = block.number() as i64; - - let result = sqlx::query!( - r#"SELECT EXISTS(SELECT 1 FROM transaction_hashes WHERE block_hash = $1) AS "exists!: bool""#, - block_hash_ref - ) - .fetch_one(&self.pool) - .await?; - - if result.exists { - return Ok(()); - } - + /// Prune blocks older blocks. + async fn prune_blocks(&self, block: &impl BlockInfo) -> Result<(), ClientError> { // Keep track of the latest block hashes, so we can prune older blocks. if let Some(keep_latest_n_blocks) = self.keep_latest_n_blocks { let latest = block.number(); + let block_hash = block.hash(); let mut block_number_to_hash = self.block_number_to_hash.lock().await; let oldest_block = latest.saturating_sub(keep_latest_n_blocks as _); @@ -202,62 +467,124 @@ impl ReceiptProvider { log::trace!(target: LOG_TARGET, "Pruning old blocks: {to_remove:?}"); self.remove(&to_remove).await?; } + Ok(()) + } - for (_, receipt) in receipts { - let transaction_hash: &[u8] = receipt.transaction_hash.as_ref(); - let transaction_index = receipt.transaction_index.as_u32() as i32; - - query!( - r#" - INSERT OR REPLACE INTO transaction_hashes (transaction_hash, block_hash, transaction_index) - VALUES ($1, $2, $3) - "#, - transaction_hash, - block_hash_ref, - transaction_index - ) - .execute(&self.pool) - .await?; + /// Insert receipts into the provider. + /// + /// Note: Can be merged into `insert_block_receipts` once is fixed and subxt let + /// us create Mock `SubstrateBlock` + async fn insert( + &self, + block: &impl BlockInfo, + receipts: &[(TransactionSigned, ReceiptInfo)], + ) -> Result<(), ClientError> { + let block_hash = block.hash(); + let block_hash_ref = block_hash.as_ref(); + let block_number = block.number() as i64; + + log::trace!(target: LOG_TARGET, "Insert receipts for substrate block #{block_number} {:?}", block_hash); + + let result = sqlx::query!( + r#"SELECT EXISTS(SELECT 1 FROM transaction_hashes WHERE block_hash = $1) AS "exists!: bool""#, + block_hash_ref + ) + .fetch_one(&self.pool) + .await?; - for log in &receipt.logs { - let log_index = log.log_index.as_u32() as i32; - let address: &[u8] = log.address.as_ref(); + self.prune_blocks(block).await?; - let topic_0 = log.topics.first().as_ref().map(|v| &v[..]); - let topic_1 = log.topics.get(1).as_ref().map(|v| &v[..]); - let topic_2 = log.topics.get(2).as_ref().map(|v| &v[..]); - let topic_3 = log.topics.get(3).as_ref().map(|v| &v[..]); - let data = log.data.as_ref().map(|v| &v.0[..]); + if !result.exists { + for (_, receipt) in receipts { + let transaction_hash: &[u8] = receipt.transaction_hash.as_ref(); + let transaction_index = receipt.transaction_index.as_u32() as i32; + let status = receipt.status.unwrap_or_default().as_u32() as i32; + + // Serialize gas_used and gas_price as big-endian bytes + let gas_used = receipt.gas_used.to_big_endian(); + let gas_used_bytes = gas_used.as_ref(); + + let gas_price = receipt.effective_gas_price.to_big_endian(); + let gas_price_bytes = gas_price.as_ref(); query!( r#" - INSERT OR REPLACE INTO logs( - block_hash, - transaction_index, - log_index, - address, - block_number, - transaction_hash, - topic_0, topic_1, topic_2, topic_3, - data) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) + INSERT OR REPLACE INTO transaction_hashes (transaction_hash, block_hash, transaction_index, status, gas_used, gas_price) + VALUES ($1, $2, $3, $4, $5, $6) "#, + transaction_hash, block_hash_ref, transaction_index, - log_index, - address, - block_number, - transaction_hash, - topic_0, - topic_1, - topic_2, - topic_3, - data + status, + gas_used_bytes, + gas_price_bytes ) .execute(&self.pool) .await?; + + for log in &receipt.logs { + let log_index = log.log_index.as_u32() as i32; + let address: &[u8] = log.address.as_ref(); + + let topic_0 = log.topics.first().as_ref().map(|v| &v[..]); + let topic_1 = log.topics.get(1).as_ref().map(|v| &v[..]); + let topic_2 = log.topics.get(2).as_ref().map(|v| &v[..]); + let topic_3 = log.topics.get(3).as_ref().map(|v| &v[..]); + let data = log.data.as_ref().map(|v| &v.0[..]); + + query!( + r#" + INSERT OR REPLACE INTO logs( + block_hash, + transaction_index, + log_index, + address, + block_number, + transaction_hash, + topic_0, topic_1, topic_2, topic_3, + data) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) + "#, + block_hash_ref, + transaction_index, + log_index, + address, + block_number, + transaction_hash, + topic_0, + topic_1, + topic_2, + topic_3, + data + ) + .execute(&self.pool) + .await?; + } } } + + // Insert block mapping from Ethereum to Substrate hash + if let Some(ethereum_hash) = self + .receipt_extractor + .get_ethereum_block_hash(&block_hash, block_number as u64) + .await + { + // Fetch gas_limit and block_author for this block + let gas_limit = + self.receipt_extractor.block_gas_limit(block_hash).await.unwrap_or_default(); // Default gas limit + let block_author = + self.receipt_extractor.block_author(block_hash).await.unwrap_or_default(); // Default author + + self.insert_block_mapping( + ðereum_hash, + &block_hash, + block_number as u64, + &gas_limit, + &block_author, + ) + .await?; + } + Ok(()) } @@ -305,7 +632,14 @@ impl ReceiptProvider { qb.push(" AND block_number <= ").push_bind(to_block.as_u64() as i64); }, (None, None, Some(hash)) => { - qb.push(" AND block_hash = ").push_bind(hash.0.to_vec()); + // Try to resolve Ethereum hash to Substrate hash first + let substrate_hash = if let Some(resolved) = self.get_substrate_hash(&hash).await { + resolved + } else { + // Fallback: treat as Substrate hash for backward compatibility + hash + }; + qb.push(" AND block_hash = ").push_bind(substrate_hash.0.to_vec()); }, (None, None, None) => { qb.push(" AND block_number = ").push_bind(latest_block.as_u64() as i64); @@ -435,58 +769,58 @@ impl ReceiptProvider { Some(rows.into_iter().collect()) } + /// Get the signed transaction and receipt for the given block hash and transaction index. + /// This method first attempts to extract the receipt from on-chain data (events/storage). + /// If that fails (e.g., due to pruned state), it falls back to reconstructing from the + /// database. + async fn signed_tx_and_receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: usize, + ) -> Option<(TransactionSigned, ReceiptInfo)> { + let block = self.block_provider.block_by_hash(block_hash).await.ok()??; + + // Try on-chain extraction first + match self.receipt_extractor.extract_from_transaction(&block, transaction_index).await { + Ok((tx, receipt)) => Some((tx, receipt)), + Err(err) => { + log::debug!( + target: LOG_TARGET, + "On-chain receipt extraction failed for block {block_hash:?} tx index {transaction_index}, falling back to DB: {err:?}" + ); + + // Fall back to DB reconstruction + let ext_iter = self.receipt_extractor.get_block_extrinsics(&block).await.ok()?; + let (_, call, _) = ext_iter.into_iter().nth(transaction_index)?; + self.receipt_from_db_by_index(&block, transaction_index, &call).await.ok() + }, + } + } + /// Get the receipt for the given block hash and transaction index. pub async fn receipt_by_block_hash_and_index( &self, block_hash: &H256, transaction_index: usize, ) -> Option { - let block = self.block_provider.block_by_hash(block_hash).await.ok()??; let (_, receipt) = self - .receipt_extractor - .extract_from_transaction(&block, transaction_index) - .await - .ok()?; + .signed_tx_and_receipt_by_block_hash_and_index(block_hash, transaction_index) + .await?; Some(receipt) } /// Get the receipt for the given transaction hash. pub async fn receipt_by_hash(&self, transaction_hash: &H256) -> Option { let (block_hash, transaction_index) = self.fetch_row(transaction_hash).await?; - - let block = self.block_provider.block_by_hash(&block_hash).await.ok()??; - let (_, receipt) = self - .receipt_extractor - .extract_from_transaction(&block, transaction_index) - .await - .ok()?; - Some(receipt) + self.receipt_by_block_hash_and_index(&block_hash, transaction_index).await } /// Get the signed transaction for the given transaction hash. pub async fn signed_tx_by_hash(&self, transaction_hash: &H256) -> Option { - let transaction_hash = transaction_hash.as_ref(); - let result = query!( - r#" - SELECT block_hash, transaction_index - FROM transaction_hashes - WHERE transaction_hash = $1 - "#, - transaction_hash - ) - .fetch_optional(&self.pool) - .await - .ok()??; - - let block_hash = H256::from_slice(&result.block_hash[..]); - let transaction_index = result.transaction_index.try_into().ok()?; - - let block = self.block_provider.block_by_hash(&block_hash).await.ok()??; + let (block_hash, transaction_index) = self.fetch_row(transaction_hash).await?; let (signed_tx, _) = self - .receipt_extractor - .extract_from_transaction(&block, transaction_index) - .await - .ok()?; + .signed_tx_and_receipt_by_block_hash_and_index(&block_hash, transaction_index) + .await?; Some(signed_tx) } } @@ -573,6 +907,7 @@ mod tests { } assert_eq!(count(&provider.pool, "transaction_hashes", None).await, n); assert_eq!(count(&provider.pool, "logs", None).await, n); + assert_eq!(count(&provider.pool, "eth_to_substrate_blocks", None).await, n); assert_eq!(provider.block_number_to_hash.lock().await.len(), n); return Ok(()); @@ -601,6 +936,7 @@ mod tests { } assert_eq!(count(&provider.pool, "transaction_hashes", None).await, 1); assert_eq!(count(&provider.pool, "logs", None).await, 1); + assert_eq!(count(&provider.pool, "eth_to_substrate_blocks", None).await, 1); assert_eq!( provider.block_number_to_hash.lock().await.clone(), [(1, H256::from([2u8; 32]))].into(), @@ -781,4 +1117,241 @@ mod tests { assert_eq!(logs, vec![log1.clone(), log2.clone()]); Ok(()) } + + #[sqlx::test] + async fn test_block_mapping_insert_get(pool: SqlitePool) -> anyhow::Result<()> { + let provider = setup_sqlite_provider(pool).await; + let ethereum_hash = H256::from([1u8; 32]); + let substrate_hash = H256::from([2u8; 32]); + let block_number = 42u64; + let gas_limit = U256::from(30_000_000); + let block_author = H160::zero(); + + // Insert mapping + provider + .insert_block_mapping( + ðereum_hash, + &substrate_hash, + block_number, + &gas_limit, + &block_author, + ) + .await?; + + // Test forward lookup + let resolved = provider.get_substrate_hash(ðereum_hash).await; + assert_eq!(resolved, Some(substrate_hash)); + + // Test reverse lookup + let resolved = provider.get_ethereum_hash(&substrate_hash).await; + assert_eq!(resolved, Some(ethereum_hash)); + + Ok(()) + } + + #[sqlx::test] + async fn test_block_mapping_overwrite(pool: SqlitePool) -> anyhow::Result<()> { + let provider = setup_sqlite_provider(pool).await; + let ethereum_hash = H256::from([1u8; 32]); + let substrate_hash1 = H256::from([2u8; 32]); + let substrate_hash2 = H256::from([3u8; 32]); + let block_number = 42u64; + let gas_limit = U256::from(30_000_000); + let block_author = H160::zero(); + + // Insert first mapping + provider + .insert_block_mapping( + ðereum_hash, + &substrate_hash1, + block_number, + &gas_limit, + &block_author, + ) + .await?; + assert_eq!(provider.get_substrate_hash(ðereum_hash).await, Some(substrate_hash1)); + + // Insert second mapping (should overwrite) + provider + .insert_block_mapping( + ðereum_hash, + &substrate_hash2, + block_number, + &gas_limit, + &block_author, + ) + .await?; + assert_eq!(provider.get_substrate_hash(ðereum_hash).await, Some(substrate_hash2)); + + // Old mapping should be gone + assert_eq!(provider.get_ethereum_hash(&substrate_hash1).await, None); + + Ok(()) + } + + #[sqlx::test] + async fn test_block_mapping_remove(pool: SqlitePool) -> anyhow::Result<()> { + let provider = setup_sqlite_provider(pool).await; + let ethereum_hash1 = H256::from([1u8; 32]); + let ethereum_hash2 = H256::from([2u8; 32]); + let substrate_hash1 = H256::from([3u8; 32]); + let substrate_hash2 = H256::from([4u8; 32]); + let block_number = 42u64; + let gas_limit = U256::from(30_000_000); + let block_author = H160::zero(); + + // Insert mappings + provider + .insert_block_mapping( + ðereum_hash1, + &substrate_hash1, + block_number, + &gas_limit, + &block_author, + ) + .await?; + provider + .insert_block_mapping( + ðereum_hash2, + &substrate_hash2, + block_number, + &gas_limit, + &block_author, + ) + .await?; + + // Verify they exist + assert_eq!(provider.get_substrate_hash(ðereum_hash1).await, Some(substrate_hash1)); + assert_eq!(provider.get_substrate_hash(ðereum_hash2).await, Some(substrate_hash2)); + + // Remove one mapping + provider.remove_block_mappings(&[ethereum_hash1]).await?; + + // Verify removal + assert_eq!(provider.get_substrate_hash(ðereum_hash1).await, None); + assert_eq!(provider.get_substrate_hash(ðereum_hash2).await, Some(substrate_hash2)); + + Ok(()) + } + + #[sqlx::test] + async fn test_block_mapping_pruning_integration(pool: SqlitePool) -> anyhow::Result<()> { + let provider = setup_sqlite_provider(pool).await; + let ethereum_hash = H256::from([1u8; 32]); + let substrate_hash = H256::from([2u8; 32]); + let block_number = 42u64; + let gas_limit = U256::from(30_000_000); + let block_author = H160::zero(); + + // Insert mapping + provider + .insert_block_mapping( + ðereum_hash, + &substrate_hash, + block_number, + &gas_limit, + &block_author, + ) + .await?; + assert_eq!(provider.get_substrate_hash(ðereum_hash).await, Some(substrate_hash)); + + // Remove substrate block (this should also remove the mapping) + provider.remove(&[substrate_hash]).await?; + + // Mapping should be gone + assert_eq!(provider.get_substrate_hash(ðereum_hash).await, None); + + Ok(()) + } + + #[sqlx::test] + async fn test_logs_with_ethereum_block_hash_mapping(pool: SqlitePool) -> anyhow::Result<()> { + let provider = setup_sqlite_provider(pool).await; + let ethereum_hash = H256::from([1u8; 32]); + let substrate_hash = H256::from([2u8; 32]); + let block_number = 1u64; + let gas_limit = U256::from(30_000_000); + let block_author = H160::zero(); + + // Create a log with substrate hash + let log = Log { + block_hash: substrate_hash, + block_number: block_number.into(), + address: H160::from([1u8; 20]), + topics: vec![H256::from([1u8; 32])], + transaction_hash: H256::from([3u8; 32]), + transaction_index: U256::from(0), + log_index: U256::from(0), + data: Some(vec![0u8; 32].into()), + ..Default::default() + }; + + // Insert the log + let block = MockBlockInfo { hash: substrate_hash, number: block_number as u32 }; + let receipts = vec![( + TransactionSigned::default(), + ReceiptInfo { + logs: vec![log.clone()], + transaction_hash: log.transaction_hash, + transaction_index: log.transaction_index, + ..Default::default() + }, + )]; + provider.insert(&block, &receipts).await?; + + // Insert block mapping + provider + .insert_block_mapping( + ðereum_hash, + &substrate_hash, + block_number, + &gas_limit, + &block_author, + ) + .await?; + + // Query logs using Ethereum block hash (should resolve to substrate hash) + let logs = provider + .logs(Some(Filter { block_hash: Some(ethereum_hash), ..Default::default() })) + .await?; + assert_eq!(logs, vec![log]); + + Ok(()) + } + + #[sqlx::test] + async fn test_mapping_count(pool: SqlitePool) -> anyhow::Result<()> { + let provider = setup_sqlite_provider(pool).await; + + // Initially no mappings + assert_eq!(count(&provider.pool, "eth_to_substrate_blocks", None).await, 0); + + // Insert some mappings + provider + .insert_block_mapping( + &H256::from([1u8; 32]), + &H256::from([2u8; 32]), + 1, + &U256::from(30_000_000), + &H160::zero(), + ) + .await?; + provider + .insert_block_mapping( + &H256::from([3u8; 32]), + &H256::from([4u8; 32]), + 2, + &U256::from(30_000_000), + &H160::zero(), + ) + .await?; + + assert_eq!(count(&provider.pool, "eth_to_substrate_blocks", None).await, 2); + + // Remove one + provider.remove_block_mappings(&[H256::from([1u8; 32])]).await?; + assert_eq!(count(&provider.pool, "eth_to_substrate_blocks", None).await, 1); + + Ok(()) + } } diff --git a/substrate/frame/revive/rpc/src/subxt_client.rs b/substrate/frame/revive/rpc/src/subxt_client.rs index f82635f206823..58a2277c35d2a 100644 --- a/substrate/frame/revive/rpc/src/subxt_client.rs +++ b/substrate/frame/revive/rpc/src/subxt_client.rs @@ -66,6 +66,14 @@ pub use subxt::config::PolkadotConfig as SrcChainConfig; substitute_type( path = "sp_weights::weight_v2::Weight", with = "::subxt::utils::Static<::sp_weights::Weight>" + ), + substitute_type( + path = "pallet_revive::evm::api::rpc_types_gen::Block", + with = "::subxt::utils::Static<::pallet_revive::evm::Block>" + ), + substitute_type( + path = "pallet_revive::evm::block_hash::ReceiptGasInfo", + with = "::subxt::utils::Static<::pallet_revive::evm::block_hash::ReceiptGasInfo>" ) )] mod src_chain {} diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index fd8fa53c0b4c1..a68a9b1ed8e35 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -21,15 +21,16 @@ use crate::{ cli::{self, CliCommand}, example::TransactionBuilder, - subxt_client, - subxt_client::{src_chain::runtime_types::pallet_revive::primitives::Code, SrcChainConfig}, + subxt_client::{ + self, src_chain::runtime_types::pallet_revive::primitives::Code, SrcChainConfig, + }, EthRpcClient, }; use clap::Parser; use jsonrpsee::ws_client::{WsClient, WsClientBuilder}; use pallet_revive::{ create1, - evm::{Account, BlockTag, U256}, + evm::{Account, BlockNumberOrTag, BlockTag, U256}, }; use static_init::dynamic; use std::{sync::Arc, thread}; @@ -53,30 +54,40 @@ async fn ws_client_with_retry(url: &str) -> WsClient { } struct SharedResources { + eth_rpc_port: u32, + node_rpc_port: u32, _node_handle: std::thread::JoinHandle<()>, _rpc_handle: std::thread::JoinHandle<()>, } impl SharedResources { - fn start() -> Self { - // Start the node. + fn start_advanced( + node_rpc_port: u32, + eth_rpc_port: u32, + node_extra_args: Vec<&'static str>, + ) -> Self { + let node_rpc_port_arg = format!("--rpc-port={node_rpc_port}"); let _node_handle = thread::spawn(move || { - if let Err(e) = start_node_inline(vec![ + let args = vec![ "--dev", - "--rpc-port=45789", + &node_rpc_port_arg, "--no-telemetry", "--no-prometheus", "-lerror,evm=debug,sc_rpc_server=info,runtime::revive=trace", - ]) { + ]; + let combined_args = [args, node_extra_args].concat(); + if let Err(e) = start_node_inline(combined_args) { panic!("Node exited with error: {e:?}"); } }); + let eth_rpc_port_arg = format!("--rpc-port={eth_rpc_port}"); + let node_rpc_url = format!("--node-rpc-url=ws://localhost:{node_rpc_port}"); // Start the rpc server. let args = CliCommand::parse_from([ "--dev", - "--rpc-port=45788", - "--node-rpc-url=ws://localhost:45789", + ð_rpc_port_arg, + &node_rpc_url, "--no-prometheus", "-linfo,eth-rpc=debug", ]); @@ -86,18 +97,36 @@ impl SharedResources { panic!("eth-rpc exited with error: {e:?}"); } }); + Self { eth_rpc_port, node_rpc_port, _node_handle, _rpc_handle } + } + + fn start() -> Self { + Self::start_advanced(45789, 45788, vec![]) + } - Self { _node_handle, _rpc_handle } + async fn client(&self) -> WsClient { + let url = format!("ws://localhost:{}", self.eth_rpc_port); + ws_client_with_retry(&url).await } - async fn client() -> WsClient { - ws_client_with_retry("ws://localhost:45788").await + async fn node_client(&self) -> OnlineClient { + let url = format!("ws://localhost:{}", self.node_rpc_port); + OnlineClient::::from_url(url) + .await + .expect("Failed to get online client") } } #[dynamic(lazy)] static mut SHARED_RESOURCES: SharedResources = SharedResources::start(); +// TODO maybe it is ok to run single shared resource for all tests? +// Setting state-pruning to low value, to not wait long for state pruning, which is required for +// some EVM reconstruction tests +#[dynamic(lazy)] +static mut SHARED_RESOURCES_QUICK_PRUNE: SharedResources = + SharedResources::start_advanced(55789, 55788, vec!["--state-pruning=8"]); + macro_rules! unwrap_call_err( ($err:expr) => { match $err.downcast_ref::().unwrap() { @@ -109,8 +138,8 @@ macro_rules! unwrap_call_err( #[tokio::test] async fn transfer() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = Arc::new(SharedResources::client().await); + let shared_resources = SHARED_RESOURCES.write(); + let client = Arc::new(shared_resources.client().await); let ethan = Account::from(subxt_signer::eth::dev::ethan()); let initial_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; @@ -137,8 +166,8 @@ async fn transfer() -> anyhow::Result<()> { #[tokio::test] async fn deploy_and_call() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = std::sync::Arc::new(SharedResources::client().await); + let shared_resources = SHARED_RESOURCES.write(); + let client = Arc::new(shared_resources.client().await); let account = Account::default(); // Balance transfer @@ -203,7 +232,12 @@ async fn deploy_and_call() -> anyhow::Result<()> { ); let balance = client.get_balance(contract_address, BlockTag::Latest.into()).await?; - assert_eq!(Some(value), balance.checked_sub(initial_balance), "Contract {contract_address:?} Balance {balance} should have increased from {initial_balance} by {value}."); + assert_eq!( + Some(value), + balance.checked_sub(initial_balance), + "Contract {contract_address:?} +Balance {balance} should have increased from {initial_balance} by {value}." + ); // Balance transfer to contract let initial_balance = client.get_balance(contract_address, BlockTag::Latest.into()).await?; @@ -227,8 +261,8 @@ async fn deploy_and_call() -> anyhow::Result<()> { #[tokio::test] async fn runtime_api_dry_run_addr_works() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = std::sync::Arc::new(SharedResources::client().await); + let shared_resources = SHARED_RESOURCES.write(); + let client = Arc::new(shared_resources.client().await); let account = Account::default(); let origin: [u8; 32] = account.substrate_account().into(); @@ -258,8 +292,8 @@ async fn runtime_api_dry_run_addr_works() -> anyhow::Result<()> { #[tokio::test] async fn invalid_transaction() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = Arc::new(SharedResources::client().await); + let shared_resources = SHARED_RESOURCES.write(); + let client = Arc::new(shared_resources.client().await); let ethan = Account::from(subxt_signer::eth::dev::ethan()); let err = TransactionBuilder::new(&client) @@ -275,3 +309,182 @@ async fn invalid_transaction() -> anyhow::Result<()> { Ok(()) } + +// Wait until state is pruned, it is assumed that initially a state is available +// at latest best block. +async fn wait_until_state_pruned(client: OnlineClient) -> anyhow::Result<()> { + let query = subxt_client::storage().revive().ethereum_block(); + let mut blocks = client.blocks().subscribe_best().await?; + + // Get current best block + let block_hash = if let Some(Ok(block)) = blocks.next().await { + block.hash() + } else { + return Err(anyhow::anyhow!("Failed to fetch next block")); + }; + let storage = client.storage().at(block_hash); + + // Inspect storage at best block we got until state is pruned + loop { + match blocks.next().await { + Some(Ok(block)) => { + println!("block current = {:?} {:?} ", block.number(), block.hash()); + // Break only on when state discarded error message appears + match storage.fetch(&query).await { + Ok(_) => { + // State still available, continue waiting + }, + Err(err) if format!("{:?}", err).contains("State already discarded") => { + println!("storage pruned: {:?}", err); + return Ok(()); + }, + Err(err) => { + return Err(anyhow::anyhow!("Error fetching storage: {:?}", err)); + }, + } + }, + Some(Err(e)) => return Err(anyhow::anyhow!("Error subscribing to blocks: {:?}", e)), + None => return Err(anyhow::anyhow!("Block subscription ended unexpectedly")), + } + } +} + +#[tokio::test] +async fn reconstructed_block_matches_storage_block() -> anyhow::Result<()> { + let shared_resources = SHARED_RESOURCES_QUICK_PRUNE.write(); + let client = Arc::new(shared_resources.client().await); + + // Deploy a contract to have some interesting blocks + let (bytes, _) = pallet_revive_fixtures::compile_module("dummy")?; + let value = U256::from(5_000_000_000_000u128); + let tx = TransactionBuilder::new(&client) + .value(value) + .input(bytes.to_vec()) + .send() + .await?; + + let receipt = tx.wait_for_receipt().await?; + let block_number = receipt.block_number; + let block_hash = receipt.block_hash; + println!("block_number = {block_number:?}"); + println!("tx hash = {:?}", tx.hash()); + + // Fetch the block immediately (should come from storage EthereumBlock) + let storage_block_by_number = client + .get_block_by_number(BlockNumberOrTag::U256(block_number.into()), true) + .await? + .expect("Block should exist"); + let storage_block_by_hash = + client.get_block_by_hash(block_hash, true).await?.expect("Block should exist"); + + // All storage blocks must match + assert_eq!( + storage_block_by_number, storage_block_by_hash, + "Storage blocks by number and hash should match" + ); + + wait_until_state_pruned(shared_resources.node_client().await).await?; + + // Fetch the same block again - it should be reconstructed now + let reconstructed_block_number = client + .get_block_by_number(BlockNumberOrTag::U256(block_number.into()), true) + .await? + .expect("Block should still exist"); + let reconstructed_block_by_hash = + client.get_block_by_hash(block_hash, true).await?.expect("Block should exist"); + + // All reconstructed blocks must match + assert_eq!( + reconstructed_block_number, reconstructed_block_by_hash, + "Reconstructed blocks by number and hash should match" + ); + + // Reconstructed and storage blocks must matchs + assert_eq!( + storage_block_by_number, reconstructed_block_number, + "Reconstructed block should match storage block exactly" + ); + Ok(()) +} + +#[tokio::test] +async fn reconstructed_tx_matches_storage_tx() -> anyhow::Result<()> { + let shared_resources = SHARED_RESOURCES_QUICK_PRUNE.write(); + let client = Arc::new(shared_resources.client().await); + + // Deploy a contract to have some interesting blocks + let (bytes, _) = pallet_revive_fixtures::compile_module("dummy")?; + let value = U256::from(5_000_000_000_000u128); + let tx = TransactionBuilder::new(&client) + .value(value) + .input(bytes.to_vec()) + .send() + .await?; + + let receipt = tx.wait_for_receipt().await?; + let block_number = receipt.block_number; + let block_hash = receipt.block_hash; + let tx_id = U256::from(0); + println!("block_number = {block_number:?}"); + println!("tx hash = {:?}", tx.hash()); + + // Fetch the tx immediately (should come from storage EthereumBlock) + let storage_tx_by_tx_hash = + client.get_transaction_by_hash(tx.hash()).await?.expect("Tx should exist"); + let storage_tx_by_block_hash_and_tx_id = client + .get_transaction_by_block_hash_and_index(block_hash, tx_id) + .await? + .expect("Tx should exist"); + let storage_tx_by_block_number_and_tx_id = client + .get_transaction_by_block_number_and_index( + BlockNumberOrTag::U256(block_number.into()), + tx_id, + ) + .await? + .expect("Tx should exist"); + + // All storage txs must match + assert_eq!( + storage_tx_by_tx_hash, storage_tx_by_block_hash_and_tx_id, + "Storage txs by hash and block hash and tx id should match" + ); + assert_eq!( + storage_tx_by_tx_hash, storage_tx_by_block_number_and_tx_id, + "Storage txs by hash and block number and tx id should match" + ); + + wait_until_state_pruned(shared_resources.node_client().await).await?; + + // Fetch the same tx again - it should be reconstructed now + let reconstructed_tx_by_tx_hash = + client.get_transaction_by_hash(tx.hash()).await?.expect("Tx should exist"); + let reconstructed_tx_by_block_hash_and_tx_id = client + .get_transaction_by_block_hash_and_index(block_hash, tx_id) + .await? + .expect("Tx should exist"); + let reconstructed_tx_by_block_number_and_tx_id = client + .get_transaction_by_block_number_and_index( + BlockNumberOrTag::U256(block_number.into()), + tx_id, + ) + .await? + .expect("Tx should exist"); + + // All reconstructed txs must match + assert_eq!( + reconstructed_tx_by_tx_hash, reconstructed_tx_by_block_hash_and_tx_id, + "Reconstructed txs by hash and block hash and tx id should match" + ); + assert_eq!( + reconstructed_tx_by_tx_hash, reconstructed_tx_by_block_number_and_tx_id, + "Storage txs by hash and block number and tx id should match" + ); + + // Reconstructed and storage txs must matchs + assert_eq!( + storage_tx_by_tx_hash, reconstructed_tx_by_tx_hash, + "Reconstructed tx should match storage tx exactly" + ); + + Ok(()) +} diff --git a/substrate/frame/revive/src/benchmarking.rs b/substrate/frame/revive/src/benchmarking.rs index c9acd950dc900..519473a52a133 100644 --- a/substrate/frame/revive/src/benchmarking.rs +++ b/substrate/frame/revive/src/benchmarking.rs @@ -21,7 +21,9 @@ use crate::{ call_builder::{caller_funding, default_deposit_limit, CallSetup, Contract, VmBinaryModule}, evm::{ - block_hash::EthereumBlockBuilder, block_storage, runtime::GAS_PRICE, + block_hash::{EthereumBlockBuilder, PalletStorage}, + block_storage, + runtime::GAS_PRICE, TransactionLegacyUnsigned, TransactionSigned, TransactionUnsigned, }, exec::{Key, MomentOf, PrecompileExt}, @@ -2721,7 +2723,10 @@ mod benchmarks { block_storage::get_receipt_details().unwrap_or_default(); let block_builder_ir = EthBlockBuilderIR::::get(); - let mut block_builder = EthereumBlockBuilder::::from_ir(block_builder_ir); + let mut block_builder = EthereumBlockBuilder::from_ir_with_storage( + block_builder_ir, + PalletStorage::::new(), + ); block_builder.process_transaction( signed_transaction, @@ -2794,7 +2799,10 @@ mod benchmarks { block_storage::get_receipt_details().unwrap_or_default(); let block_builder_ir = EthBlockBuilderIR::::get(); - let mut block_builder = EthereumBlockBuilder::::from_ir(block_builder_ir); + let mut block_builder = EthereumBlockBuilder::from_ir_with_storage( + block_builder_ir, + PalletStorage::::new(), + ); block_builder.process_transaction( signed_transaction, @@ -2858,7 +2866,10 @@ mod benchmarks { let (encoded_logs, bloom) = block_storage::get_receipt_details().unwrap_or_default(); let block_builder_ir = EthBlockBuilderIR::::get(); - let mut block_builder = EthereumBlockBuilder::::from_ir(block_builder_ir); + let mut block_builder = EthereumBlockBuilder::from_ir_with_storage( + block_builder_ir, + PalletStorage::::new(), + ); block_builder.process_transaction( signed_transaction, @@ -2920,7 +2931,10 @@ mod benchmarks { let (encoded_logs, bloom) = block_storage::get_receipt_details().unwrap_or_default(); let block_builder_ir = EthBlockBuilderIR::::get(); - let mut block_builder = EthereumBlockBuilder::::from_ir(block_builder_ir); + let mut block_builder = EthereumBlockBuilder::from_ir_with_storage( + block_builder_ir, + PalletStorage::::new(), + ); block_builder.process_transaction( signed_transaction, diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 2efe8695e148b..cc8f27c32744b 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -386,9 +386,7 @@ impl Default for SyncingStatus { } /// Transaction information -#[derive( - Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq, TypeInfo, Encode, Decode, -)] +#[derive(Debug, Default, Clone, Serialize, Eq, PartialEq, TypeInfo, Encode, Decode)] #[serde(rename_all = "camelCase")] pub struct TransactionInfo { /// block hash @@ -405,6 +403,50 @@ pub struct TransactionInfo { pub transaction_signed: TransactionSigned, } +// Custom deserializer to work around serde's limitation with flatten + untagged enums from Value +// See: https://github.com/serde-rs/serde/issues/1183 +impl<'de> Deserialize<'de> for TransactionInfo { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use alloc::{collections::BTreeMap, string::String}; + use serde::de::Error; + + // First try deserializing to a map + let mut map = >::deserialize(deserializer)?; + + // Extract the TransactionInfo-specific fields + let block_hash = + map.remove("blockHash").ok_or_else(|| D::Error::missing_field("blockHash"))?; + let block_number = map + .remove("blockNumber") + .ok_or_else(|| D::Error::missing_field("blockNumber"))?; + let from = map.remove("from").ok_or_else(|| D::Error::missing_field("from"))?; + let hash = map.remove("hash").ok_or_else(|| D::Error::missing_field("hash"))?; + let transaction_index = map + .remove("transactionIndex") + .ok_or_else(|| D::Error::missing_field("transactionIndex"))?; + + // The remaining fields should be for TransactionSigned + // Convert back to JSON and deserialize + let remaining = serde_json::Value::Object(map.into_iter().collect()); + let json_str = serde_json::to_string(&remaining).map_err(D::Error::custom)?; + let transaction_signed: TransactionSigned = + serde_json::from_str(&json_str).map_err(D::Error::custom)?; + + Ok(Self { + block_hash: serde_json::from_value(block_hash).map_err(D::Error::custom)?, + block_number: serde_json::from_value(block_number).map_err(D::Error::custom)?, + from: serde_json::from_value(from).map_err(D::Error::custom)?, + hash: serde_json::from_value(hash).map_err(D::Error::custom)?, + transaction_index: serde_json::from_value(transaction_index) + .map_err(D::Error::custom)?, + transaction_signed, + }) + } +} + #[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum TransactionUnsigned { @@ -1037,6 +1079,7 @@ pub struct Transaction4844Signed { Decode, DecodeWithMemTracking, )] +#[serde(rename_all = "camelCase")] pub struct TransactionLegacySigned { #[serde(flatten)] pub transaction_legacy_unsigned: TransactionLegacyUnsigned, @@ -1078,6 +1121,73 @@ pub struct FeeHistoryResult { mod tests { use super::*; + #[test] + fn test_transaction_info_deserialize_from_value() { + // This tests the custom deserializer for TransactionInfo + // which works around serde's limitation with flatten + untagged enums from Value + let tx_info_expected = serde_json::json!({ + "blockHash": "0xfb8c980d1da1a75e68c2ea4d55cb88d62dedbbb5eaf69df8fe337e9f6922b73a", + "blockNumber": "0x161bd0f", + "from": "0x4838b106fce9647bdf1e7877bf73ce8b0bad5f97", + "hash": "0x2c522d01183e9ed70caaf75c940ba9908d573cfc9996b3e7adc90313798279c8", + "transactionIndex": "0x7a", + "chainId": "0x1", + "gas": "0x565f", + "gasPrice": "0x23cf3fd4", + "input": "0x", + "nonce": "0x2c5ce1", + "r": "0x4a5703e4d8daf045f021cb32897a25b17d61b9ab629a59f0731ef4cce63f93d6", + "s": "0x711812237c1fed6aaf08e9f47fc47e547fdaceba9ab7507e62af29a945354fb6", + "to": "0x388c818ca8b9251b393131c08a736a67ccb19297", + "type": "0x0", + "v": "0x1", + "value": "0x12bf92aae0c2e70" + }); + + // Test deserializing from Value (this was failing before the custom deserializer) with + // below error: + // ``` + // Failed to deserialize from Value: Some(Error("data did not match any variant of untagged enum TransactionSigned", line: 0, column: 0)) + // ``` + let tx_info_from_value: Result = + serde_json::from_value(tx_info_expected.clone()); + assert!( + tx_info_from_value.is_ok(), + "Failed to deserialize from Value: {:?}", + tx_info_from_value.err() + ); + + // Test deserializing from string (this was always working) + let json_str = serde_json::to_string(&tx_info_expected).unwrap(); + let tx_info_from_str: Result = + serde_json::from_str(&json_str); + assert!( + tx_info_from_str.is_ok(), + "Failed to deserialize from string: {:?}", + tx_info_from_str.err() + ); + + // Verify both methods produce the same result + let tx_info_from_value = tx_info_from_value.unwrap(); + let tx_info_from_str = tx_info_from_str.unwrap(); + assert_eq!( + tx_info_from_value, tx_info_from_str, + "Value and string deserialization should match" + ); + + // Serialize it back to JSON + let tx_info_serialized = serde_json::to_value(&tx_info_from_value); + assert!( + tx_info_serialized.is_ok(), + "Failed to serialize to value: {:?}", + tx_info_serialized.err() + ); + let tx_info_serialized = tx_info_serialized.unwrap(); + + // Verify that deserializing and serializing leads to the same result + assert_eq!(tx_info_serialized, tx_info_expected); + } + #[test] fn test_block_serialization_roundtrip() { let json_input = r#"{ diff --git a/substrate/frame/revive/src/evm/block_hash.rs b/substrate/frame/revive/src/evm/block_hash.rs index 545fc08b6cc92..f04723093fff1 100644 --- a/substrate/frame/revive/src/evm/block_hash.rs +++ b/substrate/frame/revive/src/evm/block_hash.rs @@ -25,7 +25,10 @@ mod hash_builder; pub use hash_builder::{BuilderPhase, IncrementalHashBuilder, IncrementalHashBuilderIR}; mod block_builder; -pub use block_builder::{EthereumBlockBuilder, EthereumBlockBuilderIR}; +pub use block_builder::{ + BlockBuilderStorage, EthereumBlockBuilder, EthereumBlockBuilderIR, InMemoryStorage, + PalletStorage, +}; use crate::evm::Block; diff --git a/substrate/frame/revive/src/evm/block_hash/block_builder.rs b/substrate/frame/revive/src/evm/block_hash/block_builder.rs index 13dcf173c97f0..7c9b54e6277fb 100644 --- a/substrate/frame/revive/src/evm/block_hash/block_builder.rs +++ b/substrate/frame/revive/src/evm/block_hash/block_builder.rs @@ -29,6 +29,7 @@ use crate::{ }; use alloc::{vec, vec::Vec}; +use core::marker::PhantomData; use codec::{Decode, Encode}; use frame_support::weights::Weight; @@ -37,23 +38,91 @@ use sp_core::{keccak_256, H160, H256, U256}; const LOG_TARGET: &str = "runtime::revive::block_builder"; +/// Storage abstraction for caching first transaction/receipt values. +/// +/// The first transaction and receipt need special handling due to RLP encoding rules +/// for trie indices 0-127. This trait allows different storage backends: +/// - Pallet storage for on-chain block building +/// - In-memory storage for RPC block reconstruction +pub trait BlockBuilderStorage { + /// Store the first transaction and receipt values. + fn put_first_values(&mut self, values: (Vec, Vec)); + + /// Retrieve and remove the first transaction and receipt values. + fn take_first_values(&mut self) -> Option<(Vec, Vec)>; +} + +/// In-memory storage implementation for RPC usage. +/// +/// This implementation doesn't need persistence since RPC block reconstruction +/// happens within a single function call. +pub struct InMemoryStorage { + first_values: Option<(Vec, Vec)>, +} + +impl InMemoryStorage { + /// Creates a new in-memory storage instance. + pub const fn new() -> Self { + Self { first_values: None } + } +} + +impl BlockBuilderStorage for InMemoryStorage { + fn put_first_values(&mut self, values: (Vec, Vec)) { + self.first_values = Some(values); + } + + fn take_first_values(&mut self) -> Option<(Vec, Vec)> { + self.first_values.take() + } +} + +/// Pallet storage implementation for on-chain block building. +/// +/// This implementation uses pallet storage to persist the first transaction +/// and receipt across block construction phases. +pub struct PalletStorage { + _phantom: PhantomData, +} + +impl PalletStorage { + /// Creates a new pallet storage instance. + pub const fn new() -> Self { + Self { _phantom: PhantomData } + } +} + +impl BlockBuilderStorage for PalletStorage { + fn put_first_values(&mut self, values: (Vec, Vec)) { + crate::EthBlockBuilderFirstValues::::put(Some(values)); + } + + fn take_first_values(&mut self) -> Option<(Vec, Vec)> { + crate::EthBlockBuilderFirstValues::::take() + } +} + /// Ethereum block builder designed to incrementally build the transaction and receipt trie roots. /// /// This builder is optimized to minimize memory usage and pallet storage by leveraging the internal /// structure of the Ethereum trie and the RLP encoding of receipts. -pub struct EthereumBlockBuilder { +/// +/// The generic parameter `S` allows for different storage backends: +/// - `PalletStorage` for on-chain block building with persistent storage +/// - `InMemoryStorage` for RPC block reconstruction without persistence +pub struct EthereumBlockBuilder { pub(crate) transaction_root_builder: IncrementalHashBuilder, pub(crate) receipts_root_builder: IncrementalHashBuilder, pub(crate) tx_hashes: Vec, gas_used: U256, logs_bloom: LogsBloom, gas_info: Vec, - _phantom: core::marker::PhantomData, + storage: S, } -impl EthereumBlockBuilder { - /// Constructs a new [`EthereumBlockBuilder`]. - pub fn new() -> Self { +impl EthereumBlockBuilder { + /// Constructs a new [`EthereumBlockBuilder`] with the provided storage backend. + pub fn new(storage: S) -> Self { Self { transaction_root_builder: IncrementalHashBuilder::new(), receipts_root_builder: IncrementalHashBuilder::new(), @@ -61,7 +130,7 @@ impl EthereumBlockBuilder { tx_hashes: Vec::new(), logs_bloom: LogsBloom::new(), gas_info: Vec::new(), - _phantom: core::marker::PhantomData, + storage, } } @@ -79,10 +148,10 @@ impl EthereumBlockBuilder { } } - /// Converts the intermediate representation back into a builder. + /// Converts the intermediate representation back into a builder with the provided storage. /// /// The intermediate representation is placed into the pallet storage. - pub fn from_ir(ir: EthereumBlockBuilderIR) -> Self { + pub fn from_ir_with_storage(ir: EthereumBlockBuilderIR, storage: S) -> Self { Self { transaction_root_builder: IncrementalHashBuilder::from_ir(ir.transaction_root_builder), receipts_root_builder: IncrementalHashBuilder::from_ir(ir.receipts_root_builder), @@ -90,18 +159,18 @@ impl EthereumBlockBuilder { tx_hashes: ir.tx_hashes, logs_bloom: LogsBloom { bloom: ir.logs_bloom }, gas_info: ir.gas_info, - _phantom: core::marker::PhantomData, + storage, } } - /// Store the first transaction and receipt in pallet storage. - fn pallet_put_first_values(&mut self, values: (Vec, Vec)) { - crate::EthBlockBuilderFirstValues::::put(Some(values)); + /// Store the first transaction and receipt in storage. + fn put_first_values(&mut self, values: (Vec, Vec)) { + self.storage.put_first_values(values); } - /// Take the first transaction and receipt from pallet storage. - fn pallet_take_first_values(&mut self) -> Option<(Vec, Vec)> { - crate::EthBlockBuilderFirstValues::::take() + /// Take the first transaction and receipt from storage. + fn take_first_values(&mut self) -> Option<(Vec, Vec)> { + self.storage.take_first_values() } /// Process a single transaction at a time. @@ -134,17 +203,17 @@ impl EthereumBlockBuilder { self.gas_info.push(ReceiptGasInfo { gas_used: gas_used.ref_time().into() }); - // The first transaction and receipt are returned to be stored in the pallet storage. + // The first transaction and receipt are returned to be stored in the storage. // The index of the incremental hash builders already expects the next items. if self.tx_hashes.len() == 1 { - log::debug!(target: LOG_TARGET, "Storing first transaction and receipt in pallet storage"); - self.pallet_put_first_values((transaction_encoded, encoded_receipt)); + log::debug!(target: LOG_TARGET, "Storing first transaction and receipt in storage"); + self.put_first_values((transaction_encoded, encoded_receipt)); return; } if self.transaction_root_builder.needs_first_value(BuilderPhase::ProcessingValue) { - if let Some((first_tx, first_receipt)) = self.pallet_take_first_values() { - log::debug!(target: LOG_TARGET, "Loaded first transaction and receipt from pallet storage"); + if let Some((first_tx, first_receipt)) = self.take_first_values() { + log::debug!(target: LOG_TARGET, "Loaded first transaction and receipt from storage"); self.transaction_root_builder.set_first_value(first_tx); self.receipts_root_builder.set_first_value(first_receipt); } else { @@ -166,7 +235,7 @@ impl EthereumBlockBuilder { gas_limit: U256, ) -> (Block, Vec) { if self.transaction_root_builder.needs_first_value(BuilderPhase::Build) { - if let Some((first_tx, first_receipt)) = self.pallet_take_first_values() { + if let Some((first_tx, first_receipt)) = self.take_first_values() { self.transaction_root_builder.set_first_value(first_tx); self.receipts_root_builder.set_first_value(first_receipt); } else { @@ -405,7 +474,7 @@ mod test { ExtBuilder::default().build().execute_with(|| { // Build the ethereum block incrementally. - let mut incremental_block = EthereumBlockBuilder::::new(); + let mut incremental_block = EthereumBlockBuilder::new(PalletStorage::::new()); for (signed, logs, success, gas_used) in transaction_details { let mut log_size = 0; @@ -425,7 +494,8 @@ mod test { ); let ir = incremental_block.to_ir(); - incremental_block = EthereumBlockBuilder::from_ir(ir); + incremental_block = + EthereumBlockBuilder::from_ir_with_storage(ir, PalletStorage::::new()); println!(" Log size {:?}", log_size); } diff --git a/substrate/frame/revive/src/evm/block_storage.rs b/substrate/frame/revive/src/evm/block_storage.rs index 758b598911ccf..6d68bb54eb63b 100644 --- a/substrate/frame/revive/src/evm/block_storage.rs +++ b/substrate/frame/revive/src/evm/block_storage.rs @@ -16,7 +16,7 @@ // limitations under the License. use crate::{ - evm::block_hash::{AccumulateReceipt, EthereumBlockBuilder, LogsBloom}, + evm::block_hash::{AccumulateReceipt, EthereumBlockBuilder, LogsBloom, PalletStorage}, sp_runtime::traits::One, BlockHash, Config, EthBlockBuilderIR, EthereumBlock, ReceiptInfoData, UniqueSaturatedInto, H160, H256, @@ -88,13 +88,9 @@ pub fn on_finalize_build_eth_block( EthBlockBuilderIR::::kill(); // Load the first values if not already loaded. - let (block, receipt_data) = EthereumBlockBuilder::::from_ir(block_builder_ir).build( - eth_block_num, - parent_hash, - timestamp, - block_author, - gas_limit, - ); + let (block, receipt_data) = + EthereumBlockBuilder::from_ir_with_storage(block_builder_ir, PalletStorage::::new()) + .build(eth_block_num, parent_hash, timestamp, block_author, gas_limit); // Put the block hash into storage. BlockHash::::insert(eth_block_num, block.hash); @@ -129,7 +125,8 @@ pub fn process_transaction( let (encoded_logs, bloom) = get_receipt_details().unwrap_or_default(); let block_builder_ir = EthBlockBuilderIR::::get(); - let mut block_builder = EthereumBlockBuilder::::from_ir(block_builder_ir); + let mut block_builder = + EthereumBlockBuilder::from_ir_with_storage(block_builder_ir, PalletStorage::::new()); block_builder.process_transaction(transaction_encoded, success, gas_used, encoded_logs, bloom); diff --git a/substrate/frame/revive/src/tests/block_hash.rs b/substrate/frame/revive/src/tests/block_hash.rs index 798a14b3ddca5..42561012a1243 100644 --- a/substrate/frame/revive/src/tests/block_hash.rs +++ b/substrate/frame/revive/src/tests/block_hash.rs @@ -18,7 +18,10 @@ //! The pallet-revive ETH block hash specific integration test suite. use crate::{ - evm::{block_hash::EthereumBlockBuilder, Block, TransactionSigned}, + evm::{ + block_hash::{EthereumBlockBuilder, PalletStorage}, + Block, TransactionSigned, + }, test_utils::{builder::Contract, deposit_limit, ALICE}, tests::{assert_ok, builder, Contracts, ExtBuilder, RuntimeOrigin, Test}, BalanceWithDust, Code, Config, EthBlock, EthBlockBuilderFirstValues, EthBlockBuilderIR, @@ -85,7 +88,8 @@ fn transactions_are_captured() { let expected_tx_root = Block::compute_trie_root(&expected_payloads); // Double check the trie root hash. - let mut builder = EthereumBlockBuilder::::from_ir(block_builder); + let mut builder = + EthereumBlockBuilder::from_ir_with_storage(block_builder, PalletStorage::::new()); let first_values = EthBlockBuilderFirstValues::::get().unwrap(); builder.transaction_root_builder.set_first_value(first_values.0); @@ -170,7 +174,8 @@ fn events_are_captured() { // 1 transaction captured. assert_eq!(block_builder.gas_info.len(), 1); - let mut builder = EthereumBlockBuilder::::from_ir(block_builder); + let mut builder = + EthereumBlockBuilder::from_ir_with_storage(block_builder, PalletStorage::::new()); builder.transaction_root_builder.set_first_value(expected_payloads[0].clone()); let tx_root = builder.transaction_root_builder.finish(); assert_eq!(tx_root, expected_tx_root.0.into());