diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index bdd3b57ec5613..852056d981d62 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -714,7 +714,7 @@ jobs: if [ "$CIRCLE_NODE_TOTAL" -gt 1 ]; then PARTITION_FLAG="$((CIRCLE_NODE_INDEX + 1))/$CIRCLE_NODE_TOTAL" fi - just hack $PARTITION_FLAG + just hack "$PARTITION_FLAG" "true" "$CIRCLE_WORKFLOW_ID" - rust-save-build-cache: *hack-cache-args # -------------------------------------------------------------------------- @@ -906,25 +906,6 @@ jobs: command: | just build-<> - # Kona Build Benchmarks - kona-cargo-build-benches: - docker: - - image: <> - resource_class: 2xlarge - steps: - - utils/checkout-with-mise: - checkout-method: blobless - - rust-prepare-and-restore-cache: &kona-benches-cache - directory: rust - prefix: kona-benches - - run: - name: Build benchmarks - working_directory: rust/kona - no_output_timeout: 40m - command: | - just benches - - rust-save-build-cache: *kona-benches-cache - # Kona Coverage kona-coverage: docker: @@ -1260,9 +1241,6 @@ workflows: target: ["cannon-client", "asterisc-client"] context: *rust-ci-context - - kona-cargo-build-benches: - context: *rust-ci-context - - kona-coverage: context: *rust-ci-context requires: diff --git a/.semgrepignore b/.semgrepignore index db09993ebfea6..5a5c4eea4ea16 100644 --- a/.semgrepignore +++ b/.semgrepignore @@ -19,3 +19,6 @@ op-chain-ops/script/testdata/scripts/ # Op-alloy book theme (third-party mdbook assets) rust/op-alloy/book/ + +# Op-reth test contracts (not production Solidity code) +rust/op-reth/crates/tests/ diff --git a/rust/Cargo.lock b/rust/Cargo.lock index d3269a540e9f0..5646536d30543 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -10609,6 +10609,38 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-exex-test-utils" +version = "1.10.2" +source = "git+https://github.com/paradigmxyz/reth?rev=2c5d00f#2c5d00ffb5b7f3ed8f89382c4bbea79c173da621" +dependencies = [ + "alloy-eips", + "eyre", + "futures-util", + "reth-chainspec", + "reth-config", + "reth-consensus", + "reth-db", + "reth-db-common", + "reth-ethereum-primitives", + "reth-evm-ethereum", + "reth-execution-types", + "reth-exex", + "reth-network", + "reth-node-api", + "reth-node-builder", + "reth-node-core", + "reth-node-ethereum", + "reth-payload-builder", + "reth-primitives-traits", + "reth-provider", + "reth-tasks", + "reth-transaction-pool", + "tempfile", + "thiserror 2.0.18", + "tokio", +] + [[package]] name = "reth-exex-types" version = "1.10.2" @@ -11270,6 +11302,7 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-node", "reth-optimism-primitives", + "reth-optimism-trie", "reth-primitives-traits", "reth-provider", "reth-prune", @@ -11345,6 +11378,33 @@ dependencies = [ "thiserror 2.0.18", ] +[[package]] +name = "reth-optimism-exex" +version = "1.10.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "eyre", + "futures", + "futures-util", + "reth-db", + "reth-ethereum-primitives", + "reth-execution-types", + "reth-exex", + "reth-exex-test-utils", + "reth-node-api", + "reth-node-builder", + "reth-optimism-chainspec", + "reth-optimism-node", + "reth-optimism-trie", + "reth-primitives-traits", + "reth-provider", + "reth-trie", + "tempfile", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-flashblocks" version = "1.10.2" @@ -11407,6 +11467,8 @@ dependencies = [ "clap", "eyre", "futures", + "futures-util", + "humantime", "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types-engine", @@ -11425,12 +11487,14 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-evm", + "reth-optimism-exex", "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-optimism-rpc", "reth-optimism-storage", + "reth-optimism-trie", "reth-optimism-txpool", "reth-payload-builder", "reth-payload-util", @@ -11452,6 +11516,7 @@ dependencies = [ "serde", "serde_json", "tokio", + "tracing", "url", ] @@ -11530,10 +11595,12 @@ dependencies = [ "alloy-json-rpc", "alloy-op-hardforks", "alloy-primitives", + "alloy-rlp", "alloy-rpc-client", "alloy-rpc-types-debug", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-serde", "alloy-transport", "alloy-transport-http", "async-trait", @@ -11551,6 +11618,7 @@ dependencies = [ "op-alloy-rpc-types-engine", "op-revm", "reqwest 0.13.2", + "reth-basic-payload-builder", "reth-chain-state", "reth-chainspec", "reth-evm", @@ -11563,8 +11631,12 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-payload-builder", "reth-optimism-primitives", + "reth-optimism-trie", "reth-optimism-txpool", + "reth-payload-util", "reth-primitives-traits", + "reth-provider", + "reth-revm", "reth-rpc", "reth-rpc-api", "reth-rpc-engine-api", @@ -11575,7 +11647,9 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "revm", + "serde", "serde_json", + "strum", "thiserror 2.0.18", "tokio", "tokio-stream", @@ -11595,6 +11669,51 @@ dependencies = [ "reth-storage-api", ] +[[package]] +name = "reth-optimism-trie" +version = "1.10.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "auto_impl", + "bincode 2.0.1", + "bytes", + "derive_more", + "eyre", + "metrics", + "mockall", + "parking_lot", + "reth-chainspec", + "reth-codecs", + "reth-db", + "reth-db-api", + "reth-db-common", + "reth-ethereum-primitives", + "reth-evm", + "reth-evm-ethereum", + "reth-execution-errors", + "reth-metrics", + "reth-node-api", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-storage-errors", + "reth-tasks", + "reth-trie", + "reth-trie-common", + "secp256k1 0.31.1", + "serde", + "serial_test", + "strum", + "tempfile", + "test-case", + "thiserror 2.0.18", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-txpool" version = "1.10.2" @@ -13316,6 +13435,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.28" @@ -13372,6 +13500,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + [[package]] name = "sec1" version = "0.7.3" @@ -13646,6 +13780,32 @@ dependencies = [ "serde", ] +[[package]] +name = "serial_test" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" +dependencies = [ + "futures-executor", + "futures-util", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "sha1" version = "0.10.6" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 546b214939ee5..ef5b20677543e 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -39,6 +39,8 @@ members = [ "op-reth/crates/reth/", "op-reth/crates/rpc/", "op-reth/crates/storage/", + "op-reth/crates/exex/", + "op-reth/crates/trie/", "op-reth/crates/txpool/", "op-reth/examples/*", @@ -288,6 +290,8 @@ reth-optimism-primitives = { path = "op-reth/crates/primitives/", default-featur reth-op = { path = "op-reth/crates/reth/", default-features = false } reth-optimism-rpc = { path = "op-reth/crates/rpc/" } reth-optimism-storage = { path = "op-reth/crates/storage/" } +reth-optimism-exex = { path = "op-reth/crates/exex/" } +reth-optimism-trie = { path = "op-reth/crates/trie/" } reth-optimism-txpool = { path = "op-reth/crates/txpool/" } # ==================== OP-ALLOY INTERNAL CRATES ==================== @@ -331,6 +335,7 @@ reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", rev = reth-evm = { git = "https://github.com/paradigmxyz/reth", rev = "2c5d00f", default-features = false } reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", rev = "2c5d00f" } reth-exex = { git = "https://github.com/paradigmxyz/reth", rev = "2c5d00f" } +reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", rev = "2c5d00f" } reth-execution-errors = { git = "https://github.com/paradigmxyz/reth", rev = "2c5d00f", default-features = false } reth-execution-types = { git = "https://github.com/paradigmxyz/reth", rev = "2c5d00f", default-features = false } reth-fs-util = { git = "https://github.com/paradigmxyz/reth", rev = "2c5d00f" } @@ -532,7 +537,8 @@ proptest-derive = "0.7" proptest-arbitrary-interop = "0.1.0" rstest = "0.26.1" similar-asserts = { version = "1.7.0", features = ["serde"] } -tempfile = "3.25.0" +tempfile = "3.24.0" +serial_test = "3" test-case = "3" test-fuzz = "7.2.5" diff --git a/rust/deny.toml b/rust/deny.toml index 461d1eda1932b..b8bd5086ad1dd 100644 --- a/rust/deny.toml +++ b/rust/deny.toml @@ -17,6 +17,8 @@ ignore = [ "RUSTSEC-2025-0012", # bincode is unmaintained but still functional; transitive dep from reth-nippy-jar and test-fuzz. "RUSTSEC-2025-0141", + # https://rustsec.org/advisories/RUSTSEC-2026-0002 lru unused directly: https://github.com/alloy-rs/alloy/pull/3460 + "RUSTSEC-2026-0002", ] # This section is considered when running `cargo deny check bans`. diff --git a/rust/justfile b/rust/justfile index 76ca9da383578..4b109a8643f59 100644 --- a/rust/justfile +++ b/rust/justfile @@ -130,10 +130,27 @@ check-udeps: cargo +nightly udeps --release --workspace --all-features --all-targets # Run cargo hack for feature powerset checking -hack partition="": +# shuffle: "true" to shuffle package order before partitioning (spreads heavy/light crates more evenly) +# seed: deterministic seed for shuffle (all partition nodes must use the same seed) +hack partition="" shuffle="false" seed="default": #!/usr/bin/env bash set -euo pipefail - cargo hack check --feature-powerset --depth 2 --no-dev-deps {{ if partition != "" { "--partition " + partition } else { "" } }} + if [ "{{partition}}" != "" ]; then + echo "Running cargo hack with partition {{partition}}" + else + echo "Running cargo hack without partition" + fi + + PKG_FLAGS="" + if [ "{{shuffle}}" = "true" ]; then + PKGS=$(cargo metadata --no-deps --format-version 1 \ + | jq -r '.packages[].name' \ + | shuf --random-source=<(openssl enc -aes-256-ctr -pass "pass:{{seed}}" -nosalt /dev/null)) + PKG_FLAGS=$(echo "$PKGS" | sed 's/^/-p /' | tr '\n' ' ') + echo "Shuffled package order (seed={{seed}}): $PKGS" + fi + + cargo hack check --each-feature --no-dev-deps $PKG_FLAGS {{ if partition != "" { "--partition " + partition } else { "" } }} ######################### Documentation ################################ diff --git a/rust/op-reth/crates/cli/Cargo.toml b/rust/op-reth/crates/cli/Cargo.toml index 7cd9474a9db27..2523a6c1e19e8 100644 --- a/rust/op-reth/crates/cli/Cargo.toml +++ b/rust/op-reth/crates/cli/Cargo.toml @@ -37,6 +37,7 @@ reth-node-metrics.workspace = true reth-optimism-primitives.workspace = true reth-optimism-chainspec = { workspace = true, features = ["superchain-configs"] } reth-optimism-consensus.workspace = true +reth-optimism-trie.workspace = true reth-chainspec.workspace = true reth-node-events.workspace = true @@ -108,10 +109,7 @@ jemalloc-symbols = [ tracy = ["reth-tracing/tracy", "reth-node-core/tracy"] -dev = [ - "dep:proptest", - "reth-cli-commands/arbitrary", -] +dev = ["dep:proptest", "reth-cli-commands/arbitrary"] serde = [ "alloy-consensus/serde", diff --git a/rust/op-reth/crates/cli/src/app.rs b/rust/op-reth/crates/cli/src/app.rs index 22df73b0d90d8..730bda4c2747c 100644 --- a/rust/op-reth/crates/cli/src/app.rs +++ b/rust/op-reth/crates/cli/src/app.rs @@ -114,6 +114,9 @@ where Commands::ReExecute(command) => { runner.run_until_ctrl_c(command.execute::(components)) } + Commands::OpProofs(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } } } diff --git a/rust/op-reth/crates/cli/src/commands/mod.rs b/rust/op-reth/crates/cli/src/commands/mod.rs index 3c08400d5dc6f..9c13243791e81 100644 --- a/rust/op-reth/crates/cli/src/commands/mod.rs +++ b/rust/op-reth/crates/cli/src/commands/mod.rs @@ -14,6 +14,7 @@ use std::{fmt, sync::Arc}; pub mod import; pub mod import_receipts; pub mod init_state; +pub mod op_proofs; #[cfg(feature = "dev")] pub mod test_vectors; @@ -61,6 +62,9 @@ pub enum Commands), + /// Manage storage of historical proofs in expanded trie db in fault proof window. + #[command(name = "proofs")] + OpProofs(op_proofs::Command), } impl< @@ -85,6 +89,7 @@ impl< #[cfg(feature = "dev")] Self::TestVectors(_) => None, Self::ReExecute(cmd) => cmd.chain_spec(), + Self::OpProofs(cmd) => cmd.chain_spec(), } } } diff --git a/rust/op-reth/crates/cli/src/commands/op_proofs/init.rs b/rust/op-reth/crates/cli/src/commands/op_proofs/init.rs new file mode 100644 index 0000000000000..d38745e6fc6f7 --- /dev/null +++ b/rust/op-reth/crates/cli/src/commands/op_proofs/init.rs @@ -0,0 +1,102 @@ +//! Command that initializes the OP proofs storage with the current state of the chain. + +use clap::Parser; +use reth_chainspec::ChainInfo; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{ + InitializationJob, OpProofsStorage, OpProofsStore, db::MdbxProofsStorage, +}; +use reth_provider::{BlockNumReader, DBProvider, DatabaseProviderFactory}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Initializes the proofs storage with the current state of the chain. +/// +/// This command must be run before starting the node with proofs history enabled. +/// It backfills the proofs storage with trie nodes from the current chain state. +#[derive(Debug, Parser)] +pub struct InitCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + /// + /// This should match the path used when starting the node with + /// `--proofs-history.storage-path`. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, +} + +impl> InitCommand { + /// Execute `initialize-op-proofs` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Initializing OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Create the proofs storage + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + // Check if already initialized + if let Some((block_number, block_hash)) = storage.get_earliest_block_number()? { + info!( + target: "reth::cli", + block_number = block_number, + block_hash = ?block_hash, + "Proofs storage already initialized" + ); + return Ok(()); + } + + // Get the current chain state + let ChainInfo { best_number, best_hash, .. } = provider_factory.chain_info()?; + + info!( + target: "reth::cli", + best_number = best_number, + best_hash = ?best_hash, + "Starting backfill job for current chain state" + ); + + // Run the backfill job + { + let db_provider = + provider_factory.database_provider_ro()?.disable_long_read_transaction_safety(); + let db_tx = db_provider.into_tx(); + + InitializationJob::new(storage, db_tx).run(best_number, best_hash)?; + } + + info!( + target: "reth::cli", + best_number = best_number, + best_hash = ?best_hash, + "Proofs storage initialized successfully" + ); + + Ok(()) + } +} + +impl InitCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/rust/op-reth/crates/cli/src/commands/op_proofs/mod.rs b/rust/op-reth/crates/cli/src/commands/op_proofs/mod.rs new file mode 100644 index 0000000000000..9f49a288ccb3d --- /dev/null +++ b/rust/op-reth/crates/cli/src/commands/op_proofs/mod.rs @@ -0,0 +1,57 @@ +//! OP Proofs management commands + +use clap::{Parser, Subcommand}; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::CliNodeTypes; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use std::sync::Arc; + +pub mod init; +pub mod prune; +pub mod unwind; + +/// `op-reth op-proofs` command +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +impl> Command { + /// Execute `op-proofs` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + match self.command { + Subcommands::Init(cmd) => cmd.execute::().await, + Subcommands::Prune(cmd) => cmd.execute::().await, + Subcommands::Unwind(cmd) => cmd.execute::().await, + } + } +} + +impl Command { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + match &self.command { + Subcommands::Init(cmd) => cmd.chain_spec(), + Subcommands::Prune(cmd) => cmd.chain_spec(), + Subcommands::Unwind(cmd) => cmd.chain_spec(), + } + } +} + +/// `op-reth op-proofs` subcommands +#[derive(Debug, Subcommand)] +pub enum Subcommands { + /// Initialize the proofs storage with the current state of the chain + #[command(name = "init")] + Init(init::InitCommand), + /// Prune old proof history to reclaim space + #[command(name = "prune")] + Prune(prune::PruneCommand), + /// Unwind the proofs storage to a specific block + #[command(name = "unwind")] + Unwind(unwind::UnwindCommand), +} diff --git a/rust/op-reth/crates/cli/src/commands/op_proofs/prune.rs b/rust/op-reth/crates/cli/src/commands/op_proofs/prune.rs new file mode 100644 index 0000000000000..fd7a6e8d053e6 --- /dev/null +++ b/rust/op-reth/crates/cli/src/commands/op_proofs/prune.rs @@ -0,0 +1,90 @@ +//! Command that prunes the OP proofs storage. + +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{ + OpProofStoragePruner, OpProofsStorage, OpProofsStore, db::MdbxProofsStorage, +}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Prunes the proofs storage by removing old proof history and state updates. +#[derive(Debug, Parser)] +pub struct PruneCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, + + /// The window to span blocks for proofs history. Value is the number of blocks. + /// Default is 1 month of blocks based on 2 seconds block time. + /// 30 * 24 * 60 * 60 / 2 = `1_296_000` + #[arg( + long = "proofs-history.window", + default_value_t = 1_296_000, + value_name = "PROOFS_HISTORY_WINDOW" + )] + pub proofs_history_window: u64, + + /// The batch size for pruning operations. + #[arg( + long = "proofs-history.prune-batch-size", + default_value_t = 1000, + value_name = "PROOFS_HISTORY_PRUNE_BATCH_SIZE" + )] + pub proofs_history_prune_batch_size: u64, +} + +impl> PruneCommand { + /// Execute [`PruneCommand`]. + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Pruning OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + let earliest_block = storage.get_earliest_block_number()?; + let latest_block = storage.get_latest_block_number()?; + info!( + target: "reth::cli", + ?earliest_block, + ?latest_block, + "Current proofs storage block range" + ); + + let pruner = OpProofStoragePruner::new( + storage, + provider_factory, + self.proofs_history_window, + self.proofs_history_prune_batch_size, + ); + pruner.run(); + Ok(()) + } +} + +impl PruneCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/rust/op-reth/crates/cli/src/commands/op_proofs/unwind.rs b/rust/op-reth/crates/cli/src/commands/op_proofs/unwind.rs new file mode 100644 index 0000000000000..f95ffcf4737b2 --- /dev/null +++ b/rust/op-reth/crates/cli/src/commands/op_proofs/unwind.rs @@ -0,0 +1,106 @@ +//! Command that unwinds the OP proofs storage to a specific block number. + +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore, db::MdbxProofsStorage}; +use reth_provider::{BlockReader, TransactionVariant}; +use std::{path::PathBuf, sync::Arc}; +use tracing::{info, warn}; + +/// Unwinds the proofs storage to a specific block number. +/// +/// This command removes all proof history and state updates after the target block number. +#[derive(Debug, Parser)] +pub struct UnwindCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, + + /// The target block number to unwind to. + /// + /// All history *after* this block will be removed. + #[arg(long, value_name = "TARGET_BLOCK")] + pub target: u64, +} + +impl UnwindCommand { + /// Validates that the target block number is within a valid range for unwinding. + fn validate_unwind_range( + &self, + storage: &OpProofsStorage, + ) -> eyre::Result { + let (Some((earliest, _)), Some((latest, _))) = + (storage.get_earliest_block_number()?, storage.get_latest_block_number()?) + else { + warn!(target: "reth::cli", "No blocks found in proofs storage. Nothing to unwind."); + return Ok(false); + }; + + if self.target <= earliest { + warn!(target: "reth::cli", unwind_target = ?self.target, ?earliest, "Target block is less than the earliest block in proofs storage. Nothing to unwind."); + return Ok(false); + } + + if self.target > latest { + warn!(target: "reth::cli", unwind_target = ?self.target, ?latest, "Target block is not less than the latest block in proofs storage. Nothing to unwind."); + return Ok(false); + } + + Ok(true) + } +} + +impl> UnwindCommand { + /// Execute [`UnwindCommand`]. + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Unwinding OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Create the proofs storage + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + // Validate that the target block is within a valid range for unwinding + if !self.validate_unwind_range(&storage)? { + return Ok(()); + } + + // Get the target block from the main database + let block = provider_factory + .recovered_block(self.target.into(), TransactionVariant::NoHash)? + .ok_or_else(|| { + eyre::eyre!("Target block {} not found in the main database", self.target) + })?; + + info!(target: "reth::cli", block_number = block.number, block_hash = %block.hash(), "Unwinding to target block"); + storage.unwind_history(block.block_with_parent())?; + + Ok(()) + } +} + +impl UnwindCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/rust/op-reth/crates/exex/Cargo.toml b/rust/op-reth/crates/exex/Cargo.toml new file mode 100644 index 0000000000000..ccfdb0b202c34 --- /dev/null +++ b/rust/op-reth/crates/exex/Cargo.toml @@ -0,0 +1,68 @@ +[package] +name = "reth-optimism-exex" +version = "1.10.2" +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage = "https://paradigmxyz.github.io/reth" +repository = "https://github.com/paradigmxyz/reth" +description = "Execution extensions for OP-Reth" + +[lints] +workspace = true + +[dependencies] +# reth +reth-exex.workspace = true +reth-execution-types.workspace = true +reth-node-api.workspace = true +reth-trie.workspace = true +reth-provider.workspace = true + +# op-reth +# proofs exex handles `TrieUpdates` in notifications +reth-optimism-trie = { workspace = true, features = ["serde-bincode-compat"] } + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true + +# misc +eyre.workspace = true +futures-util.workspace = true +tracing.workspace = true +tokio.workspace = true + +[dev-dependencies] +futures.workspace = true +reth-db = { workspace = true, features = ["op", "test-utils"] } +reth-node-builder.workspace = true +reth-optimism-node.workspace = true +reth-optimism-chainspec.workspace = true +reth-primitives-traits.workspace = true +reth-ethereum-primitives.workspace = true +reth-exex-test-utils.workspace = true +tempfile.workspace = true + +[features] +test-utils = [ + "reth-db/test-utils", + "reth-trie/test-utils", + "reth-node-builder/test-utils", + "reth-optimism-node/test-utils", + "reth-provider/test-utils", + "reth-ethereum-primitives/test-utils", + "reth-primitives-traits/test-utils", +] +metrics = [ + "reth-optimism-trie/metrics", + "reth-trie/metrics" +] + +[package.metadata.cargo-udeps.ignore] +development = [ + "reth-node-builder", + "reth-optimism-node", + "reth-optimism-chainspec", + "tempfile.workspace", +] diff --git a/rust/op-reth/crates/exex/src/lib.rs b/rust/op-reth/crates/exex/src/lib.rs new file mode 100644 index 0000000000000..0e12a14e2d82b --- /dev/null +++ b/rust/op-reth/crates/exex/src/lib.rs @@ -0,0 +1,1124 @@ +//! ExEx unique for OP-Reth. See also [`reth_exex`] for more op-reth execution extensions. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_consensus::BlockHeader; +use alloy_eips::eip1898::BlockWithParent; +use futures_util::TryStreamExt; +use reth_execution_types::Chain; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; +use reth_optimism_trie::{ + OpProofStoragePrunerTask, OpProofsStorage, OpProofsStore, live::LiveTrieCollector, +}; +use reth_provider::{BlockNumReader, BlockReader, TransactionVariant}; +use reth_trie::{HashedPostStateSorted, SortedTrieData, updates::TrieUpdatesSorted}; +use std::{sync::Arc, time::Duration}; +use tokio::{sync::watch, task, time}; +use tracing::{debug, error, info}; + +// Safety threshold for maximum blocks to prune automatically on startup. +// If the required prune exceeds this, the node will error out and require manual pruning. Default +// is 1000 blocks. +const MAX_PRUNE_BLOCKS_STARTUP: u64 = 1000; + +/// How many blocks to process in a single batch before yielding. Default is 50 blocks. +const SYNC_BLOCKS_BATCH_SIZE: usize = 50; + +/// How close to tip before we process blocks in real-time vs batch. Default is 1024 blocks. +const REAL_TIME_BLOCKS_THRESHOLD: u64 = 1024; + +/// How long to sleep when sync task is caught up. Default is 5 seconds. +const SYNC_IDLE_SLEEP_SECS: u64 = 5; + +/// Default proofs history window: 1 month of blocks at 2s block time +const DEFAULT_PROOFS_HISTORY_WINDOW: u64 = 1_296_000; + +/// Default interval between proof-storage prune runs. Default is 15 seconds. +const DEFAULT_PRUNE_INTERVAL: Duration = Duration::from_secs(15); + +/// Default verification interval: disabled +const DEFAULT_VERIFICATION_INTERVAL: u64 = 0; // disabled + +/// Builder for [`OpProofsExEx`]. +#[derive(Debug)] +pub struct OpProofsExExBuilder +where + Node: FullNodeComponents, +{ + ctx: ExExContext, + storage: OpProofsStorage, + proofs_history_window: u64, + proofs_history_prune_interval: Duration, + verification_interval: u64, +} + +impl OpProofsExExBuilder +where + Node: FullNodeComponents, +{ + /// Create a new builder with required parameters and defaults. + pub const fn new(ctx: ExExContext, storage: OpProofsStorage) -> Self { + Self { + ctx, + storage, + proofs_history_window: DEFAULT_PROOFS_HISTORY_WINDOW, + proofs_history_prune_interval: DEFAULT_PRUNE_INTERVAL, + verification_interval: DEFAULT_VERIFICATION_INTERVAL, + } + } + + /// Sets the window to span blocks for proofs history. + pub const fn with_proofs_history_window(mut self, window: u64) -> Self { + self.proofs_history_window = window; + self + } + + /// Sets the interval between proof-storage prune runs. + pub const fn with_proofs_history_prune_interval(mut self, interval: Duration) -> Self { + self.proofs_history_prune_interval = interval; + self + } + + /// Sets the verification interval. + pub const fn with_verification_interval(mut self, interval: u64) -> Self { + self.verification_interval = interval; + self + } + + /// Builds the [`OpProofsExEx`]. + pub fn build(self) -> OpProofsExEx { + OpProofsExEx { + ctx: self.ctx, + storage: self.storage, + proofs_history_window: self.proofs_history_window, + proofs_history_prune_interval: self.proofs_history_prune_interval, + verification_interval: self.verification_interval, + } + } +} + +/// OP Proofs ExEx - processes blocks and tracks state changes within fault proof window. +/// +/// Saves and serves trie nodes to make proofs faster. This handles the process of +/// saving the current state, new blocks as they're added, and serving proof RPCs +/// based on the saved data. +/// +/// # Examples +/// +/// The following example shows how to install the ExEx with either in-memory or persistent storage. +/// This can be used when launching an OP-Reth node via a binary. +/// We are currently using it in optimism/bin/src/main.rs. +/// +/// ``` +/// use futures_util::FutureExt; +/// use reth_db::test_utils::create_test_rw_db; +/// use reth_node_api::NodeTypesWithDBAdapter; +/// use reth_node_builder::{NodeBuilder, NodeConfig}; +/// use reth_optimism_chainspec::BASE_MAINNET; +/// use reth_optimism_exex::OpProofsExEx; +/// use reth_optimism_node::{OpNode, args::RollupArgs}; +/// use reth_optimism_trie::{InMemoryProofsStorage, OpProofsStorage, db::MdbxProofsStorage}; +/// use reth_provider::providers::BlockchainProvider; +/// use std::{sync::Arc, time::Duration}; +/// +/// let config = NodeConfig::new(BASE_MAINNET.clone()); +/// let db = create_test_rw_db(); +/// let args = RollupArgs::default(); +/// let op_node = OpNode::new(args); +/// +/// // Create in-memory or persistent storage +/// let storage: OpProofsStorage> = +/// Arc::new(InMemoryProofsStorage::new()).into(); +/// +/// // Example for creating persistent storage +/// # let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); +/// # let storage_path = temp_dir.path().join("proofs_storage"); +/// +/// # let storage: OpProofsStorage> = Arc::new( +/// # MdbxProofsStorage::new(&storage_path).expect("Failed to create MdbxProofsStorage"), +/// # ).into(); +/// +/// let storage_exec = storage.clone(); +/// let proofs_history_window = 1_296_000u64; +/// let proofs_history_prune_interval = Duration::from_secs(3600); +/// +/// // Verification interval: perform full execution every N blocks +/// let verification_interval = 0; // 0 = disabled, 100 = verify every 100 blocks +/// +/// // Can also use install_exex_if along with a boolean flag +/// // Set this based on your configuration or CLI args +/// let _builder = NodeBuilder::new(config) +/// .with_database(db) +/// .with_types_and_provider::>>() +/// .with_components(op_node.components()) +/// .install_exex("proofs-history", move |exex_context| async move { +/// Ok(OpProofsExEx::builder(exex_context, storage_exec) +/// .with_proofs_history_window(proofs_history_window) +/// .with_proofs_history_prune_interval(proofs_history_prune_interval) +/// .with_verification_interval(verification_interval) +/// .build() +/// .run() +/// .boxed()) +/// }) +/// .on_node_started(|_full_node| Ok(())) +/// .check_launch(); +/// ``` +#[derive(Debug)] +pub struct OpProofsExEx +where + Node: FullNodeComponents, +{ + /// The ExEx context containing the node related utilities e.g. provider, notifications, + /// events. + ctx: ExExContext, + /// The type of storage DB. + storage: OpProofsStorage, + /// The window to span blocks for proofs history. Value is the number of blocks, received as + /// cli arg. + proofs_history_window: u64, + /// Interval between proof-storage prune runs + proofs_history_prune_interval: Duration, + /// Verification interval: perform full block execution every N blocks for data integrity. + /// If 0, verification is disabled (always use fast path when available). + /// If 1, verification is always enabled (always execute blocks). + verification_interval: u64, +} + +impl OpProofsExEx +where + Node: FullNodeComponents, +{ + /// Create a new `OpProofsExEx` instance. + pub fn new(ctx: ExExContext, storage: OpProofsStorage) -> Self { + OpProofsExExBuilder::new(ctx, storage).build() + } + + /// Create a new builder for `OpProofsExEx`. + pub const fn builder( + ctx: ExExContext, + storage: OpProofsStorage, + ) -> OpProofsExExBuilder { + OpProofsExExBuilder::new(ctx, storage) + } +} + +impl OpProofsExEx +where + Node: FullNodeComponents>, + Primitives: NodePrimitives, + Storage: OpProofsStore + Clone + 'static, +{ + /// Main execution loop for the ExEx + pub async fn run(mut self) -> eyre::Result<()> { + self.ensure_initialized()?; + let sync_target_tx = self.spawn_sync_task(); + + let prune_task = OpProofStoragePrunerTask::new( + self.storage.clone(), + self.ctx.provider().clone(), + self.proofs_history_window, + self.proofs_history_prune_interval, + ); + self.ctx + .task_executor() + .spawn_with_graceful_shutdown_signal(|signal| Box::pin(prune_task.run(signal))); + + let collector = LiveTrieCollector::new( + self.ctx.evm_config().clone(), + self.ctx.provider().clone(), + &self.storage, + ); + + while let Some(notification) = self.ctx.notifications.try_next().await? { + self.handle_notification(notification, &collector, &sync_target_tx)?; + } + + Ok(()) + } + + /// Ensure proofs storage is initialized + fn ensure_initialized(&self) -> eyre::Result<()> { + // Check if proofs storage is initialized + let earliest_block_number = match self.storage.get_earliest_block_number()? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!( + "Proofs storage not initialized. Please run 'op-reth initialize-op-proofs --proofs-history.storage-path ' first." + )); + } + }; + + let latest_block_number = match self.storage.get_latest_block_number()? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!( + "Proofs storage not initialized. Please run 'op-reth initialize-op-proofs --proofs-history.storage-path ' first." + )); + } + }; + + // Check if we have accumulated too much history for the configured window. + // If the gap between what we have and what we want to keep is too large, the auto-pruner + // will stall the node. + let target_earliest = latest_block_number.saturating_sub(self.proofs_history_window); + if target_earliest > earliest_block_number { + let blocks_to_prune = target_earliest - earliest_block_number; + if blocks_to_prune > MAX_PRUNE_BLOCKS_STARTUP { + return Err(eyre::eyre!( + "Configuration requires pruning {} blocks, which exceeds the safety threshold of {}. \ + Huge prune operations can stall the node. \ + Please run 'op-reth proofs prune' manually before starting the node.", + blocks_to_prune, + MAX_PRUNE_BLOCKS_STARTUP + )); + } + } + + // Need to update the earliest block metric on startup as this is not called frequently and + // can show outdated info. When metrics are disabled, this is a no-op. + #[cfg(feature = "metrics")] + { + self.storage + .metrics() + .block_metrics() + .earliest_number + .set(earliest_block_number as f64); + } + + Ok(()) + } + + /// Spawn the background sync task and return the target sender + fn spawn_sync_task(&self) -> watch::Sender { + let (sync_target_tx, sync_target_rx) = watch::channel(0u64); + + let task_storage = self.storage.clone(); + let task_provider = self.ctx.provider().clone(); + let task_evm_config = self.ctx.evm_config().clone(); + + self.ctx.task_executor().spawn_critical_task( + "optimism::exex::proofs_storage_sync_loop", + async move { + let storage = task_storage.clone(); + let task_collector = + LiveTrieCollector::new(task_evm_config, task_provider.clone(), &storage); + Self::sync_loop(sync_target_rx, task_storage, task_provider, &task_collector).await; + }, + ); + + sync_target_tx + } + + /// Background sync loop that processes blocks up to the target + async fn sync_loop( + mut sync_target_rx: watch::Receiver, + storage: OpProofsStorage, + provider: Node::Provider, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) { + debug!(target: "optimism::exex", "Starting proofs storage sync loop"); + + loop { + let target = *sync_target_rx.borrow_and_update(); + let latest = match storage.get_latest_block_number() { + Ok(Some((n, _))) => n, + Ok(None) => { + error!(target: "optimism::exex", "No blocks stored in proofs storage during sync loop"); + continue; + } + Err(e) => { + error!(target: "optimism::exex", error = ?e, "Failed to get latest block"); + continue; + } + }; + + if latest >= target { + time::sleep(Duration::from_secs(SYNC_IDLE_SLEEP_SECS)).await; + continue; + } + + // Process one batch + if let Err(e) = + Self::process_batch(latest, target, &provider, collector, SYNC_BLOCKS_BATCH_SIZE) + { + error!(target: "optimism::exex", error = ?e, "Batch processing failed"); + } + + // Yield to allow other tasks to run + debug!(target: "optimism::exex", latest_stored = latest, target, "Batch processed, yielding"); + task::yield_now().await; + } + } + + /// Process a batch of blocks from start to target (up to `batch_size`) + fn process_batch( + start: u64, + target: u64, + provider: &Node::Provider, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + batch_size: usize, + ) -> eyre::Result<()> { + let end = (start + batch_size as u64).min(target); + debug!( + target: "optimism::exex", + start, + end, + "Processing proofs storage sync batch" + ); + + for block_num in (start + 1)..=end { + let block = provider + .recovered_block(block_num.into(), TransactionVariant::NoHash)? + .ok_or_else(|| eyre::eyre!("Missing block {}", block_num))?; + + collector.execute_and_store_block_updates(&block)?; + } + + Ok(()) + } + + fn handle_notification( + &self, + notification: ExExNotification, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + sync_target_tx: &watch::Sender, + ) -> eyre::Result<()> { + let latest_stored = match self.storage.get_latest_block_number()? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!("No blocks stored in proofs storage")); + } + }; + + match ¬ification { + ExExNotification::ChainCommitted { new } => { + self.handle_chain_committed(new.clone(), latest_stored, collector, sync_target_tx)? + } + ExExNotification::ChainReorged { old, new } => { + self.handle_chain_reorged(old.clone(), new.clone(), latest_stored, collector)? + } + ExExNotification::ChainReverted { old } => { + self.handle_chain_reverted(old.clone(), latest_stored, collector)? + } + } + + if let Some(committed_chain) = notification.committed_chain() { + self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + + Ok(()) + } + + fn handle_chain_committed( + &self, + new: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + sync_target_tx: &watch::Sender, + ) -> eyre::Result<()> { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + block_hash = ?new.tip().hash(), + "ChainCommitted notification received", + ); + + // If tip is not newer than what we have, nothing to do. + if new.tip().number() <= latest_stored { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + latest_stored, + "Already processed, skipping" + ); + return Ok(()); + } + + let best_block = self.ctx.provider().best_block_number()?; + let is_sequential = new.tip().number() == latest_stored + 1; + let is_near_tip = + best_block.saturating_sub(new.tip().number()) < REAL_TIME_BLOCKS_THRESHOLD; + + if is_sequential && is_near_tip { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + latest_stored, + best_block, + "Processing in real-time" + ); + + // Process each block from latest_stored + 1 to tip + let start = latest_stored.saturating_add(1); + for block_number in start..=new.tip().number() { + self.process_block(block_number, &new, collector)?; + } + } else { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + latest_stored, + best_block, + is_sequential, + is_near_tip, + "Scheduling batch processing via sync task" + ); + + // Update the sync target to the new tip + sync_target_tx.send(new.tip().number())?; + } + + Ok(()) + } + + /// Process a single block - either from chain or provider + fn process_block( + &self, + block_number: u64, + chain: &Chain, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + // Check if this block should be verified via full execution + let should_verify = self.verification_interval > 0 && + block_number.is_multiple_of(self.verification_interval); + + // Try to get block data from the chain first + // 1. Fast Path: Try to use pre-computed state from the notification + if let Some(block) = chain.blocks().get(&block_number) { + // Check if we have BOTH trie updates and hashed state. + // If either is missing, we fall back to execution to ensure data integrity. + if let Some((trie_updates, hashed_state)) = chain.trie_data_at(block_number).map(|d| { + let SortedTrieData { hashed_state, trie_updates } = d.get(); + (trie_updates, hashed_state) + }) { + // Use fast path only if we're not scheduled to verify this block + if !should_verify { + debug!( + target: "optimism::exex", + block_number, + "Using pre-computed state updates from notification" + ); + + collector.store_block_updates( + block.block_with_parent(), + (**trie_updates).clone(), + (**hashed_state).clone(), + )?; + + return Ok(()); + } + + info!( + target: "optimism::exex", + block_number, + verification_interval = self.verification_interval, + "Periodic verification: performing full block execution" + ); + } + + debug!( + target: "optimism::exex", + block_number, + "Block present in notification but state updates missing, falling back to execution" + ); + } + + // 2. Slow Path: Block not in chain (or state missing), fetch from provider and execute + debug!( + target: "optimism::exex", + block_number, + "Fetching block from provider for execution", + ); + + let block = self + .ctx + .provider() + .recovered_block(block_number.into(), TransactionVariant::NoHash)? + .ok_or_else(|| eyre::eyre!("Missing block {} in provider", block_number))?; + + collector.execute_and_store_block_updates(&block)?; + Ok(()) + } + + fn handle_chain_reorged( + &self, + old: Arc>, + new: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + info!( + old_block_number = old.tip().number(), + old_block_hash = ?old.tip().hash(), + new_block_number = new.tip().number(), + new_block_hash = ?new.tip().hash(), + "ChainReorged notification received", + ); + + if old.first().number() > latest_stored { + debug!(target: "optimism::exex", "Reorg beyond stored blocks, skipping"); + return Ok(()); + } + + // find the common ancestor + let mut block_updates: Vec<( + BlockWithParent, + Arc, + Arc, + )> = Vec::with_capacity(new.len()); + for block_number in new.blocks().keys() { + // verify if the fork point matches + if old.fork_block() != new.fork_block() { + return Err(eyre::eyre!( + "Fork blocks do not match: old fork block {:?}, new fork block {:?}", + old.fork_block(), + new.fork_block() + )); + } + + let block = new + .blocks() + .get(block_number) + .ok_or_else(|| eyre::eyre!("Missing block {} in new chain", block_number))?; + let trie_data = new + .trie_data_at(*block_number) + .ok_or_else(|| { + eyre::eyre!("Missing Trie data for block {} in new chain", block_number) + })? + .get(); + let trie_updates = &trie_data.trie_updates; + let hashed_state = &trie_data.hashed_state; + + block_updates.push(( + block.block_with_parent(), + trie_updates.clone(), + hashed_state.clone(), + )); + } + + collector.unwind_and_store_block_updates(block_updates)?; + + Ok(()) + } + + fn handle_chain_reverted( + &self, + old: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + info!( + target: "optimism::exex", + old_block_number = old.tip().number(), + old_block_hash = ?old.tip().hash(), + "ChainReverted notification received", + ); + + if old.first().number() > latest_stored { + debug!( + target: "optimism::exex", + first_block_number = old.first().number(), + latest_stored = latest_stored, + "Fork block number is greater than latest stored, skipping", + ); + return Ok(()); + } + + collector.unwind_history(old.first().block_with_parent())?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::private::alloy_primitives::B256; + use alloy_eips::{BlockNumHash, NumHash, eip1898::BlockWithParent}; + use reth_db::test_utils::tempdir_path; + use reth_ethereum_primitives::{Block, Receipt}; + use reth_execution_types::{Chain, ExecutionOutcome}; + use reth_optimism_trie::{ + BlockStateDiff, OpProofsStorage, OpProofsStore, db::MdbxProofsStorage, + }; + use reth_primitives_traits::RecoveredBlock; + use reth_trie::{HashedPostStateSorted, LazyTrieData, updates::TrieUpdatesSorted}; + use std::{collections::BTreeMap, default::Default, sync::Arc, time::Duration}; + + // ------------------------------------------------------------------------- + // Helpers: deterministic blocks and deterministic Chain with precomputed updates + // ------------------------------------------------------------------------- + fn b256(byte: u8) -> B256 { + B256::new([byte; 32]) + } + + // deterministic hash from block number: 0 -> 0x00.., 1 -> 0x01.., etc. + fn hash_for_num(num: u64) -> B256 { + // if you only care about small test numbers, this is enough: + b256(num as u8) + + // If you want to avoid wrapping when num > 255, use something like: + // let mut out = [0u8; 32]; + // out[0..8].copy_from_slice(&num.to_be_bytes()); + // B256::new(out) + } + + fn mk_block(num: u64) -> RecoveredBlock { + let mut b: RecoveredBlock = Default::default(); + b.set_block_number(num); + b.set_hash(hash_for_num(num)); + b.set_parent_hash(hash_for_num(num - 1)); + b + } + + fn mk_chain_with_updates( + from: u64, + to: u64, + hash_override: Option, + ) -> Chain { + let mut blocks: Vec> = Vec::new(); + let mut trie_data = BTreeMap::new(); + + for n in from..=to { + let mut b = mk_block(n); + if let Some(hash) = hash_override { + b.set_hash(hash); + } + blocks.push(b); + + let data = LazyTrieData::ready( + Arc::new(HashedPostStateSorted::default()), + Arc::new(TrieUpdatesSorted::default()), + ); + trie_data.insert(n, data); + } + + let execution_outcome: ExecutionOutcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Vec::new(), + requests: Vec::new(), + first_block: from, + }; + + Chain::new(blocks, execution_outcome, trie_data) + } + + // Init_storage to the genesis block + fn init_storage(storage: OpProofsStorage) { + let genesis_block = NumHash::new(0, b256(0x00)); + storage + .set_earliest_block_number(genesis_block.number, genesis_block.hash) + .expect("set earliest"); + storage + .store_trie_updates( + BlockWithParent::new(genesis_block.hash, genesis_block), + BlockStateDiff::default(), + ) + .expect("store trie update"); + } + + // Initialize exex with config + fn build_test_exex( + ctx: ExExContext, + storage: OpProofsStorage, + ) -> OpProofsExEx + where + NodeT: FullNodeComponents, + Store: OpProofsStore + Clone + 'static, + { + OpProofsExEx::builder(ctx, storage) + .with_proofs_history_window(20) + .with_proofs_history_prune_interval(Duration::from_secs(3600)) + .with_verification_interval(1000) + .build() + } + + #[tokio::test] + async fn handle_notification_chain_committed() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + let exex = build_test_exex(ctx, proofs.clone()); + + // Notification: chain committed 1..5 + let new_chain = Arc::new(mk_chain_with_updates(1, 1, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + + let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + + exex.handle_notification(notif, &collector, &sync_target_tx).expect("handle chain commit"); + + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 1); + } + + #[tokio::test] + async fn handle_notification_chain_committed_skips_already_processed() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + + let exex = build_test_exex(ctx, proofs.clone()); + + let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + // Process blocks 1..5 sequentially to trigger real-time path (synchronous) + for i in 1..=5 { + let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain commit"); + } + + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 5); + + // Try to handle already processed notification + let new_chain = Arc::new(mk_chain_with_updates(5, 5, Some(hash_for_num(10)))); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + exex.handle_notification(notif, &collector, &sync_target_tx).expect("handle chain commit"); + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok"); + assert_eq!(latest.0, 5); + assert_eq!(latest.1, hash_for_num(5)); // block was not updated + } + + #[tokio::test] + async fn handle_notification_chain_reorged() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + + let exex = build_test_exex(ctx, proofs.clone()); + + let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + + for i in 1..=10 { + let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain commit"); + } + + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 10); + + // Now the tip is 10, and we want to reorg from block 6..12 + let old_chain = Arc::new(mk_chain_with_updates(6, 10, None)); + let new_chain = Arc::new(mk_chain_with_updates(6, 12, None)); + + // Notification: chain reorged 6..12 + let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; + + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain re-orged"); + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 12); + } + + #[tokio::test] + async fn handle_notification_chain_reorged_skips_beyond_stored_blocks() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + + let exex = build_test_exex(ctx, proofs.clone()); + + let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + + for i in 1..=10 { + let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain commit"); + } + + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 10); + + // Now the tip is 10, and we want to reorg from block 12..15 + let old_chain = Arc::new(mk_chain_with_updates(12, 15, None)); + let new_chain = Arc::new(mk_chain_with_updates(10, 20, None)); + + // Notification: chain reorged 12..15 + let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; + + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain re-orged"); + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 10); + } + + #[tokio::test] + async fn handle_notification_chain_reverted() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + + let exex = build_test_exex(ctx, proofs.clone()); + + let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + + for i in 1..=10 { + let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain commit"); + } + + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 10); + + // Now the tip is 10, and we want to revert from block 9..10 + let old_chain = Arc::new(mk_chain_with_updates(9, 10, None)); + + // Notification: chain reverted 9..10 + let notif = ExExNotification::ChainReverted { old: old_chain }; + + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain reverted"); + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 8); + } + + #[tokio::test] + async fn handle_notification_chain_reverted_skips_beyond_stored_blocks() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + + let exex = build_test_exex(ctx, proofs.clone()); + + let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + + for i in 1..=5 { + let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain commit"); + } + + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 5); + + // Now the tip is 10, and we want to revert from block 9..10 + let old_chain = Arc::new(mk_chain_with_updates(9, 10, None)); + + // Notification: chain reverted 9..10 + let notif = ExExNotification::ChainReverted { old: old_chain }; + + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain reverted"); + let latest = proofs.get_latest_block_number().expect("get latest block").expect("ok").0; + assert_eq!(latest, 5); + } + + #[tokio::test] + async fn ensure_initialized_errors_on_storage_not_initialized() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let exex = build_test_exex(ctx, proofs.clone()); + let _ = exex.ensure_initialized().expect_err("should return error"); + } + + #[tokio::test] + async fn ensure_initialized_errors_when_prune_exceeds_threshold() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + for i in 1..1100 { + proofs + .store_trie_updates( + BlockWithParent::new( + hash_for_num(i - 1), + BlockNumHash::new(i, hash_for_num(i)), + ), + BlockStateDiff::default(), + ) + .expect("store trie update"); + } + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let exex = build_test_exex(ctx, proofs.clone()); + let _ = exex.ensure_initialized().expect_err("should return error"); + } + + #[tokio::test] + async fn ensure_initialized_succeeds() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let exex = build_test_exex(ctx, proofs.clone()); + exex.ensure_initialized().expect("should not return error"); + } + + #[tokio::test] + async fn handle_notification_errors_on_empty_storage() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + + let exex = build_test_exex(ctx, proofs.clone()); + + // Any notification will do + let new_chain = Arc::new(mk_chain_with_updates(1, 5, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + + let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + let err = exex.handle_notification(notif, &collector, &sync_target_tx).unwrap_err(); + assert_eq!(err.to_string(), "No blocks stored in proofs storage"); + } + + #[tokio::test] + async fn handle_notification_schedules_async_on_gap() { + // MDBX proofs storage + let dir = tempdir_path(); + let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); + let proofs: OpProofsStorage> = store.clone().into(); + + init_storage(proofs.clone()); + + let (ctx, _handle) = + reth_exex_test_utils::test_exex_context().await.expect("exex test context"); + + let collector = LiveTrieCollector::new( + ctx.components.components.evm_config.clone(), + ctx.components.provider.clone(), + &proofs, + ); + let exex = build_test_exex(ctx, proofs.clone()); + + // Notification: chain committed 5..10 (Blocks 1,2,3,4 are missing from storage) + let new_chain = Arc::new(mk_chain_with_updates(5, 10, None)); + let notif = ExExNotification::ChainCommitted { new: new_chain }; + + let (sync_target_tx, mut sync_target_rx) = tokio::sync::watch::channel(0u64); + + // Process notification + exex.handle_notification(notif, &collector, &sync_target_tx) + .expect("handle chain commit should return ok immediately"); + + // Verify async signal was sent + // The target in the channel should now be 10 (the tip of the new chain) + assert_eq!( + *sync_target_rx.borrow_and_update(), + 10, + "Should have scheduled sync to block 10" + ); + + // Verify Main Thread did NOT process it + // Because we didn't spawn the actual worker thread in this test, storage should still be at + // 0. This proves the 'handle_notification' returned instantly without doing the + // heavy lifting. + let latest = proofs.get_latest_block_number().expect("get").expect("ok").0; + assert_eq!(latest, 0, "Main thread should not have processed the blocks synchronously"); + } +} diff --git a/rust/op-reth/crates/node/Cargo.toml b/rust/op-reth/crates/node/Cargo.toml index 2fa8937c1c206..78e851e081f49 100644 --- a/rust/op-reth/crates/node/Cargo.toml +++ b/rust/op-reth/crates/node/Cargo.toml @@ -25,12 +25,14 @@ reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm.workspace = true reth-rpc-server-types.workspace = true -reth-tasks = { workspace = true, optional = true } +reth-tasks.workspace = true reth-trie-common.workspace = true reth-node-core.workspace = true reth-rpc-engine-api.workspace = true reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true +reth-db = { workspace = true, features = ["op"] } +reth-db-api = { workspace = true, features = ["op"] } # op-reth reth-optimism-payload-builder.workspace = true @@ -41,11 +43,22 @@ reth-optimism-txpool.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus = { workspace = true, features = ["std"] } reth-optimism-forks.workspace = true -reth-optimism-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } +reth-optimism-primitives = { workspace = true, features = [ + "serde", + "serde-bincode-compat", + "reth-codec", +] } +reth-optimism-exex = { workspace = true, features = ["metrics"] } +reth-optimism-trie = { workspace = true, features = ["metrics"] } # revm with required optimism features # Note: this must be kept to ensure all features are properly enabled/forwarded -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } +revm = { workspace = true, features = [ + "secp256k1", + "blst", + "c-kzg", + "memory_limit", +] } op-revm.workspace = true # ethereum @@ -64,9 +77,11 @@ clap.workspace = true serde.workspace = true eyre.workspace = true url.workspace = true +humantime.workspace = true +futures-util.workspace = true +tracing.workspace = true # test-utils dependencies -reth-db-api = { workspace = true, optional = true, features = ["op"] } reth-e2e-test-utils = { workspace = true, optional = true } alloy-genesis = { workspace = true, optional = true } serde_json = { workspace = true, optional = true } @@ -92,26 +107,24 @@ op-alloy-network.workspace = true [features] default = ["reth-codec"] asm-keccak = [ - "alloy-primitives/asm-keccak", - "reth-optimism-node/asm-keccak", - "reth-node-core/asm-keccak", - "revm/asm-keccak", + "alloy-primitives/asm-keccak", + "reth-optimism-node/asm-keccak", + "reth-node-core/asm-keccak", + "revm/asm-keccak", ] keccak-cache-global = [ - "alloy-primitives/keccak-cache-global", - "reth-node-core/keccak-cache-global", - "reth-optimism-node/keccak-cache-global", + "alloy-primitives/keccak-cache-global", + "reth-node-core/keccak-cache-global", + "reth-optimism-node/keccak-cache-global", ] js-tracer = [ - "reth-node-builder/js-tracer", - "reth-optimism-node/js-tracer", - "reth-rpc/js-tracer", - "reth-rpc-eth-types/js-tracer", + "reth-node-builder/js-tracer", + "reth-optimism-node/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-types/js-tracer", ] test-utils = [ "reth-codec", - "reth-tasks", - "dep:reth-db-api", "reth-e2e-test-utils", "alloy-genesis", "serde_json", @@ -131,8 +144,9 @@ test-utils = [ "reth-trie-common/test-utils", "reth-trie-db/test-utils", "reth-stages-types/test-utils", - "reth-db-api?/test-utils", - "reth-tasks?/test-utils" + "reth-db-api/test-utils", + "reth-tasks/test-utils", + "reth-optimism-exex/test-utils" ] reth-codec = ["reth-optimism-primitives/reth-codec"] diff --git a/rust/op-reth/crates/node/src/args.rs b/rust/op-reth/crates/node/src/args.rs index abddb76109c32..4f6e1cc84b58b 100644 --- a/rust/op-reth/crates/node/src/args.rs +++ b/rust/op-reth/crates/node/src/args.rs @@ -2,8 +2,10 @@ //! clap [Args](clap::Args) for optimism rollup configuration +use clap::builder::ArgPredicate; use op_alloy_consensus::interop::SafetyLevel; use reth_optimism_txpool::supervisor::DEFAULT_SUPERVISOR_URL; +use std::{path::PathBuf, time::Duration}; use url::Url; /// Parameters for rollup configuration @@ -82,6 +84,66 @@ pub struct RollupArgs { /// Requires `flashblocks_url` to be set. #[arg(long, default_value_t = false, requires = "flashblocks_url")] pub flashblock_consensus: bool, + + /// If true, initialize external-proofs exex to save and serve trie nodes to provide proofs + /// faster. + #[arg( + long = "proofs-history", + value_name = "PROOFS_HISTORY", + default_value_ifs([ + ("proofs-history.storage-path", ArgPredicate::IsPresent, "true") + ]) + )] + pub proofs_history: bool, + + /// The path to the storage DB for proofs history. + #[arg(long = "proofs-history.storage-path", value_name = "PROOFS_HISTORY_STORAGE_PATH")] + pub proofs_history_storage_path: Option, + + /// The window to span blocks for proofs history. Value is the number of blocks. + /// Default is 1 month of blocks based on 2 seconds block time. + /// 30 * 24 * 60 * 60 / 2 = `1_296_000` + #[arg( + long = "proofs-history.window", + default_value_t = 1_296_000, + value_name = "PROOFS_HISTORY_WINDOW" + )] + pub proofs_history_window: u64, + + /// Interval between proof-storage prune runs. Accepts human-friendly durations + /// like "100s", "5m", "1h". Defaults to 15s. + /// + /// - Shorter intervals prune smaller batches more often, so each prune run tends to be faster + /// and the blocking pause for writes is shorter, at the cost of more frequent pauses. + /// - Longer intervals prune larger batches less often, which reduces how often pruning runs, + /// but each run can take longer and block writes for longer. + /// + /// A shorter interval is preferred so that prune + /// runs stay small and don’t stall writes for too long. + /// + /// CLI: `--proofs-history.prune-interval 10m` + #[arg( + long = "proofs-history.prune-interval", + value_name = "PROOFS_HISTORY_PRUNE_INTERVAL", + default_value = "15s", + value_parser = humantime::parse_duration + )] + pub proofs_history_prune_interval: Duration, + /// Verification interval: perform full block execution every N blocks for data integrity. + /// - 0: Disabled (Default) (always use fast path with pre-computed data from notifications) + /// - 1: Always verify (always execute blocks, slowest) + /// - N: Verify every Nth block (e.g., 100 = every 100 blocks) + /// + /// Periodic verification helps catch data corruption or consensus bugs while maintaining + /// good performance. + /// + /// CLI: `--proofs-history.verification-interval 100` + #[arg( + long = "proofs-history.verification-interval", + value_name = "PROOFS_HISTORY_VERIFICATION_INTERVAL", + default_value_t = 0 + )] + pub proofs_history_verification_interval: u64, } impl Default for RollupArgs { @@ -99,6 +161,11 @@ impl Default for RollupArgs { min_suggested_priority_fee: 1_000_000, flashblocks_url: None, flashblock_consensus: false, + proofs_history: false, + proofs_history_storage_path: None, + proofs_history_window: 1_296_000, + proofs_history_prune_interval: Duration::from_secs(15), + proofs_history_verification_interval: 0, } } } diff --git a/rust/op-reth/crates/node/src/lib.rs b/rust/op-reth/crates/node/src/lib.rs index f91b968f33afc..0f150c9afbdf2 100644 --- a/rust/op-reth/crates/node/src/lib.rs +++ b/rust/op-reth/crates/node/src/lib.rs @@ -33,6 +33,8 @@ pub use version::OP_NAME_CLIENT; pub use reth_optimism_txpool as txpool; +pub mod proof_history; + /// Helpers for running test node instances. #[cfg(feature = "test-utils")] pub mod utils; diff --git a/rust/op-reth/crates/node/src/proof_history.rs b/rust/op-reth/crates/node/src/proof_history.rs new file mode 100644 index 0000000000000..03f5f1ac4082f --- /dev/null +++ b/rust/op-reth/crates/node/src/proof_history.rs @@ -0,0 +1,109 @@ +//! Node luncher with proof history support. + +use crate::{OpNode, args::RollupArgs}; +use eyre::ErrReport; +use futures_util::FutureExt; +use reth_db::DatabaseEnv; +use reth_db_api::database_metrics::DatabaseMetrics; +use reth_node_builder::{FullNodeComponents, NodeBuilder, WithLaunchContext}; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_exex::OpProofsExEx; +use reth_optimism_rpc::{ + debug::{DebugApiExt, DebugApiOverrideServer}, + eth::proofs::{EthApiExt, EthApiOverrideServer}, +}; +use reth_optimism_trie::{OpProofsStorage, db::MdbxProofsStorage}; +use reth_tasks::TaskExecutor; +use std::{sync::Arc, time::Duration}; +use tokio::time::sleep; +use tracing::info; + +/// - no proofs history (plain node), +/// - in-mem proofs storage, +/// - MDBX proofs storage. +pub async fn launch_node_with_proof_history( + builder: WithLaunchContext, OpChainSpec>>, + args: RollupArgs, +) -> eyre::Result<(), ErrReport> { + let RollupArgs { + proofs_history, + proofs_history_window, + proofs_history_prune_interval, + proofs_history_verification_interval, + .. + } = args; + + // Start from a plain OpNode builder + let mut node_builder = builder.node(OpNode::new(args.clone())); + + if proofs_history { + let path = args + .proofs_history_storage_path + .clone() + .expect("Path must be provided if not using in-memory storage"); + info!(target: "reth::cli", "Using on-disk storage for proofs history"); + + let mdbx = Arc::new( + MdbxProofsStorage::new(&path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ); + let storage: OpProofsStorage> = mdbx.clone().into(); + + let storage_exec = storage.clone(); + + node_builder = node_builder + .on_node_started(move |node| { + spawn_proofs_db_metrics( + node.task_executor, + mdbx, + node.config.metrics.push_gateway_interval, + ); + Ok(()) + }) + .install_exex("proofs-history", async move |exex_context| { + Ok(OpProofsExEx::builder(exex_context, storage_exec) + .with_proofs_history_window(proofs_history_window) + .with_proofs_history_prune_interval(proofs_history_prune_interval) + .with_verification_interval(proofs_history_verification_interval) + .build() + .run() + .boxed()) + }) + .extend_rpc_modules(move |ctx| { + let api_ext = EthApiExt::new(ctx.registry.eth_api().clone(), storage.clone()); + let debug_ext = DebugApiExt::new( + ctx.node().provider().clone(), + ctx.registry.eth_api().clone(), + storage, + Box::new(ctx.node().task_executor().clone()), + ctx.node().evm_config().clone(), + ); + ctx.modules.replace_configured(api_ext.into_rpc())?; + ctx.modules.replace_configured(debug_ext.into_rpc())?; + Ok(()) + }); + } + + // In all cases (with or without proofs), launch the node. + let handle = node_builder.launch_with_debug_capabilities().await?; + handle.node_exit_future.await +} +/// Spawns a task that periodically reports metrics for the proofs DB. +fn spawn_proofs_db_metrics( + executor: TaskExecutor, + storage: Arc, + metrics_report_interval: Duration, +) { + executor.spawn_critical_task("op-proofs-storage-metrics", async move { + info!( + target: "reth::cli", + ?metrics_report_interval, + "Starting op-proofs-storage metrics task" + ); + + loop { + sleep(metrics_report_interval).await; + storage.report_metrics(); + } + }); +} diff --git a/rust/op-reth/crates/rpc/Cargo.toml b/rust/op-reth/crates/rpc/Cargo.toml index 7c66f4c655c2d..04a40be4fffb3 100644 --- a/rust/op-reth/crates/rpc/Cargo.toml +++ b/rust/op-reth/crates/rpc/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-primitives-traits = { workspace = true, features = ["op"] } reth-storage-api.workspace = true @@ -21,6 +22,7 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true +reth-revm.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-node-api.workspace = true @@ -28,6 +30,8 @@ reth-node-builder.workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-rpc-engine-api.workspace = true +reth-payload-util.workspace = true +reth-provider.workspace = true # op-reth reth-optimism-evm.workspace = true @@ -37,14 +41,17 @@ reth-optimism-txpool.workspace = true # TODO remove node-builder import reth-optimism-primitives = { workspace = true, features = ["reth-codec", "serde-bincode-compat", "serde"] } reth-optimism-forks.workspace = true +reth-optimism-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-json-rpc.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true alloy-rpc-client.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-debug.workspace = true +alloy-serde.workspace = true alloy-transport.workspace = true alloy-transport-http.workspace = true alloy-consensus.workspace = true @@ -58,6 +65,7 @@ revm.workspace = true op-revm.workspace = true # async +serde.workspace = true tokio.workspace = true futures.workspace = true tokio-stream.workspace = true @@ -81,6 +89,9 @@ derive_more = { workspace = true, features = ["constructor"] } reth-metrics.workspace = true metrics.workspace = true +# enum +strum.workspace = true + [dev-dependencies] reth-optimism-chainspec.workspace = true alloy-op-hardforks.workspace = true diff --git a/rust/op-reth/crates/rpc/src/debug.rs b/rust/op-reth/crates/rpc/src/debug.rs new file mode 100644 index 0000000000000..c46d87b67c6ec --- /dev/null +++ b/rust/op-reth/crates/rpc/src/debug.rs @@ -0,0 +1,331 @@ +//! Historical proofs RPC server implementation for `debug_` namespace. + +use crate::{ + metrics::{DebugApiExtMetrics, DebugApis}, + state::OpStateProviderFactory, +}; +use alloy_consensus::BlockHeader; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::B256; +use alloy_rlp::Encodable; +use alloy_rpc_types_debug::ExecutionWitness; +use async_trait::async_trait; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee_core::RpcResult; +use jsonrpsee_types::error::ErrorObject; +use reth_basic_payload_builder::PayloadConfig; +use reth_evm::{ConfigureEvm, execute::Executor}; +use reth_node_api::{BuildNextEnv, NodePrimitives, PayloadBuilderError}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_payload_builder::{ + OpAttributes, OpPayloadPrimitives, + builder::{OpBuilder, OpPayloadBuilderCtx}, +}; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore}; +use reth_optimism_txpool::OpPooledTransaction as OpPooledTx2; +use reth_payload_util::NoopPayloadTransactions; +use reth_primitives_traits::{SealedHeader, TxTy}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, HeaderProvider, NodePrimitivesProvider, ProviderError, + ProviderResult, StateProviderFactory, +}; +use reth_revm::{State, database::StateProviderDatabase, witness::ExecutionWitnessRecord}; +use reth_rpc_api::eth::helpers::FullEthApi; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::{ToRpcResult, result::internal_rpc_err}; +use reth_tasks::TaskSpawner; +use serde::{Deserialize, Serialize}; +use std::{marker::PhantomData, sync::Arc}; +use tokio::sync::{Semaphore, oneshot}; + +/// Represents the current proofs sync status. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct ProofsSyncStatus { + /// The earliest block number for which proofs are available. + earliest: Option, + /// The latest block number for which proofs are available. + latest: Option, +} + +#[cfg_attr(not(test), rpc(server, namespace = "debug"))] +#[cfg_attr(test, rpc(server, client, namespace = "debug"))] +pub trait DebugApiOverride { + /// Executes a payload and returns the execution witness. + #[method(name = "executePayload")] + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attributes, + ) -> RpcResult; + + /// Returns the execution witness for a given block. + #[method(name = "executionWitness")] + async fn execution_witness(&self, block: BlockNumberOrTag) -> RpcResult; + + /// Returns the current proofs sync status. + #[method(name = "proofsSyncStatus")] + async fn proofs_sync_status(&self) -> RpcResult; +} + +#[derive(Debug)] +/// Overrides applied to the `debug_` namespace of the RPC API for the OP Proofs ExEx. +pub struct DebugApiExt { + inner: Arc>, +} + +impl DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + Storage: OpProofsStore + Clone + 'static, + Provider: BlockReaderIdExt + NodePrimitivesProvider, + EvmConfig: ConfigureEvm + 'static, +{ + /// Creates a new instance of the `DebugApiExt`. + pub fn new( + provider: Provider, + eth_api: Eth, + preimage_store: OpProofsStorage, + task_spawner: Box, + evm_config: EvmConfig, + ) -> Self { + Self { + inner: Arc::new(DebugApiExtInner::new( + provider, + eth_api, + preimage_store, + task_spawner, + evm_config, + )), + } + } +} + +#[derive(Debug)] +/// Overrides applied to the `debug_` namespace of the RPC API for historical proofs ExEx. +pub struct DebugApiExtInner { + provider: Provider, + eth_api: Eth, + storage: OpProofsStorage, + state_provider_factory: OpStateProviderFactory, + evm_config: EvmConfig, + task_spawner: Box, + semaphore: Semaphore, + _attrs: PhantomData, + metrics: DebugApiExtMetrics, +} + +impl DebugApiExtInner +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Provider: NodePrimitivesProvider, +{ + fn new( + provider: Provider, + eth_api: Eth, + storage: OpProofsStorage

, + task_spawner: Box, + evm_config: EvmConfig, + ) -> Self { + Self { + provider, + storage: storage.clone(), + state_provider_factory: OpStateProviderFactory::new(eth_api.clone(), storage), + eth_api, + evm_config, + task_spawner, + semaphore: Semaphore::new(3), + _attrs: PhantomData, + metrics: DebugApiExtMetrics::new(), + } + } +} + +impl DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Provider: BlockReaderIdExt + + NodePrimitivesProvider + + HeaderProvider

::BlockHeader>, +{ + fn parent_header( + &self, + parent_block_hash: B256, + ) -> ProviderResult> { + self.inner + .provider + .sealed_header_by_hash(parent_block_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into())) + } +} + +#[async_trait] +impl DebugApiOverrideServer + for DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Attrs: OpAttributes>, + N: OpPayloadPrimitives, + EvmConfig: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + > + 'static, + Provider: BlockReaderIdExt
+ + StateProviderFactory + + ChainSpecProvider + + NodePrimitivesProvider + + HeaderProvider
+ + Clone + + 'static, + op_alloy_consensus::OpPooledTransaction: + TryFrom<::_TX, Error: core::error::Error>, + ::_TX: From, +{ + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attrs::RpcPayloadAttributes, + ) -> RpcResult { + self.inner + .metrics + .record_operation_async(DebugApis::DebugExecutePayload, async { + let _permit = self.inner.semaphore.acquire().await; + + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; + + let (tx, rx) = oneshot::channel(); + let this = self.inner.clone(); + self.inner.task_spawner.spawn_blocking_task(Box::pin(async move { + let result = async { + let parent_hash = parent_header.hash(); + let attributes = Attrs::try_new(parent_hash, attributes, 3) + .map_err(PayloadBuilderError::other)?; + + let config = + PayloadConfig { parent_header: Arc::new(parent_header), attributes }; + let ctx = OpPayloadBuilderCtx { + evm_config: this.evm_config.clone(), + chain_spec: this.provider.chain_spec(), + config, + cancel: Default::default(), + best_payload: Default::default(), + builder_config: Default::default(), + }; + + let state_provider = this + .state_provider_factory + .state_provider(Some(BlockId::Hash(parent_hash.into()))) + .await + .map_err(PayloadBuilderError::other)?; + + let builder = OpBuilder::new(|_| { + NoopPayloadTransactions::< + OpPooledTx2< + ::_TX, + op_alloy_consensus::OpPooledTransaction, + >, + >::default() + }); + + builder.witness(state_provider, &ctx).map_err(PayloadBuilderError::other) + }; + + let _ = tx.send(result.await); + })); + + rx.await + .map_err(|err| internal_rpc_err(err.to_string()))? + .map_err(|err| internal_rpc_err(err.to_string())) + }) + .await + } + + async fn execution_witness(&self, block_id: BlockNumberOrTag) -> RpcResult { + self.inner + .metrics + .record_operation_async(DebugApis::DebugExecutionWitness, async { + let _permit = self.inner.semaphore.acquire().await; + + let block = self + .inner + .eth_api + .recovered_block(block_id.into()) + .await? + .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; + + let this = self.inner.clone(); + let block_number = block.header().number(); + + let state_provider = this + .state_provider_factory + .state_provider(Some(BlockId::Number(block.parent_num_hash().number.into()))) + .await + .map_err(EthApiError::from)?; + let db = StateProviderDatabase::new(&state_provider); + let block_executor = this.eth_api.evm_config().executor(db); + + let mut witness_record = ExecutionWitnessRecord::default(); + + let _ = block_executor + .execute_with_state_closure(&block, |statedb: &State<_>| { + witness_record.record_executed_state(statedb); + }) + .map_err(EthApiError::from)?; + + let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number } = + witness_record; + + let state = state_provider + .witness(Default::default(), hashed_state) + .map_err(EthApiError::from)?; + let mut exec_witness = + ExecutionWitness { state, codes, keys, ..Default::default() }; + + // If there were no calls to the BLOCKHASH opcode, return only the + // parent header. + let smallest = + lowest_block_number.unwrap_or_else(|| block_number.saturating_sub(1)); + + let range = smallest..block_number; + exec_witness.headers = self + .inner + .provider + .headers_range(range) + .map_err(EthApiError::from)? + .into_iter() + .map(|header| { + let mut serialized_header = Vec::new(); + header.encode(&mut serialized_header); + serialized_header.into() + }) + .collect(); + + Ok(exec_witness) + }) + .await + } + + async fn proofs_sync_status(&self) -> RpcResult { + let earliest = self + .inner + .storage + .get_earliest_block_number() + .map_err(|err| internal_rpc_err(err.to_string()))?; + let latest = self + .inner + .storage + .get_latest_block_number() + .map_err(|err| internal_rpc_err(err.to_string()))?; + + Ok(ProofsSyncStatus { + earliest: earliest.map(|(block_number, _)| block_number), + latest: latest.map(|(block_number, _)| block_number), + }) + } +} diff --git a/rust/op-reth/crates/rpc/src/eth/mod.rs b/rust/op-reth/crates/rpc/src/eth/mod.rs index b0e24046cfb8f..483cd4a7efc33 100644 --- a/rust/op-reth/crates/rpc/src/eth/mod.rs +++ b/rust/op-reth/crates/rpc/src/eth/mod.rs @@ -1,6 +1,7 @@ //! OP-Reth `eth_` endpoint implementation. pub mod ext; +pub mod proofs; pub mod receipt; pub mod transaction; diff --git a/rust/op-reth/crates/rpc/src/eth/proofs.rs b/rust/op-reth/crates/rpc/src/eth/proofs.rs new file mode 100644 index 0000000000000..07522c96b20f8 --- /dev/null +++ b/rust/op-reth/crates/rpc/src/eth/proofs.rs @@ -0,0 +1,95 @@ +//! Historical proofs RPC server implementation. + +use crate::{metrics::EthApiExtMetrics, state::OpStateProviderFactory}; +use alloy_eips::BlockId; +use alloy_primitives::Address; +use alloy_rpc_types_eth::EIP1186AccountProofResponse; +use alloy_serde::JsonStorageKey; +use async_trait::async_trait; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee_core::RpcResult; +use jsonrpsee_types::error::ErrorObject; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore}; +use reth_provider::StateProofProvider; +use reth_rpc_api::eth::helpers::FullEthApi; +use std::time::Instant; + +#[cfg_attr(not(test), rpc(server, namespace = "eth"))] +#[cfg_attr(test, rpc(server, client, namespace = "eth"))] +pub trait EthApiOverride { + /// Returns the account and storage values of the specified account including the Merkle-proof. + /// This call can be used to verify that the data you are pulling from is not tampered with. + #[method(name = "getProof")] + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult; +} + +#[derive(Debug)] +/// Overrides applied to the `eth_` namespace of the RPC API for historical proofs ExEx. +pub struct EthApiExt { + state_provider_factory: OpStateProviderFactory, + metrics: EthApiExtMetrics, +} + +impl EthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, +{ + /// Creates a new instance of the `EthApiExt`. + pub fn new(eth_api: Eth, preimage_store: OpProofsStorage

) -> Self { + let metrics = EthApiExtMetrics::default(); + Self { + state_provider_factory: OpStateProviderFactory::new(eth_api, preimage_store), + metrics, + } + } +} + +#[async_trait] +impl EthApiOverrideServer for EthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, +{ + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult { + let start = Instant::now(); + self.metrics.get_proof_requests.increment(1); + + let storage_keys = keys.iter().map(|key| key.as_b256()).collect::>(); + + let result = async { + let proof = self + .state_provider_factory + .state_provider(block_number) + .await + .map_err(Into::into)? + .proof(Default::default(), address, &storage_keys) + .map_err(Into::into)?; + + Ok(proof.into_eip1186_response(keys)) + } + .await; + + match &result { + Ok(_) => { + self.metrics.get_proof_latency.record(start.elapsed().as_secs_f64()); + self.metrics.get_proof_successful_responses.increment(1); + } + Err(_) => self.metrics.get_proof_failures.increment(1), + } + + result + } +} diff --git a/rust/op-reth/crates/rpc/src/lib.rs b/rust/op-reth/crates/rpc/src/lib.rs index d1f8e8dbdd0b9..475322c3b3cf9 100644 --- a/rust/op-reth/crates/rpc/src/lib.rs +++ b/rust/op-reth/crates/rpc/src/lib.rs @@ -8,6 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg))] +pub mod debug; pub mod engine; pub mod error; pub mod eth; @@ -15,6 +16,7 @@ pub mod historical; pub mod metrics; pub mod miner; pub mod sequencer; +pub mod state; pub mod witness; #[cfg(feature = "client")] @@ -22,5 +24,5 @@ pub use engine::OpEngineApiClient; pub use engine::{OP_ENGINE_CAPABILITIES, OpEngineApi, OpEngineApiServer}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; -pub use metrics::SequencerMetrics; +pub use metrics::{EthApiExtMetrics, SequencerMetrics}; pub use sequencer::SequencerClient; diff --git a/rust/op-reth/crates/rpc/src/metrics.rs b/rust/op-reth/crates/rpc/src/metrics.rs index 5aa5e3eff3d6e..17ac94f6aa610 100644 --- a/rust/op-reth/crates/rpc/src/metrics.rs +++ b/rust/op-reth/crates/rpc/src/metrics.rs @@ -1,8 +1,11 @@ //! RPC metrics unique for OP-stack. +use alloy_primitives::map::HashMap; use core::time::Duration; -use metrics::Histogram; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; +use std::time::Instant; +use strum::{EnumCount, EnumIter, IntoEnumIterator}; /// Optimism sequencer metrics #[derive(Metrics, Clone)] @@ -19,3 +22,114 @@ impl SequencerMetrics { self.sequencer_forward_latency.record(duration.as_secs_f64()); } } + +/// Optimism ETH API extension metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.eth_api_ext")] +pub struct EthApiExtMetrics { + /// How long it takes to handle a `eth_getProof` request successfully + pub(crate) get_proof_latency: Histogram, + + /// Total number of `eth_getProof` requests + pub(crate) get_proof_requests: Counter, + + /// Total number of successful `eth_getProof` responses + pub(crate) get_proof_successful_responses: Counter, + + /// Total number of failures handling `eth_getProof` requests + pub(crate) get_proof_failures: Counter, +} + +/// Types of debug apis +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, EnumCount, EnumIter)] +pub enum DebugApis { + /// `DebugExecutePayload` Api + DebugExecutePayload, + /// `DebugExecutionWitness` Api + DebugExecutionWitness, +} + +impl DebugApis { + /// Returns the operation as a string for metrics labels. + pub const fn as_str(&self) -> &'static str { + match self { + Self::DebugExecutePayload => "debug_execute_payload", + Self::DebugExecutionWitness => "debug_execution_witness", + } + } +} + +/// Metrics for Debug API extension calls. +#[derive(Debug)] +pub struct DebugApiExtMetrics { + /// Per-api metrics handles + apis: HashMap, +} + +impl DebugApiExtMetrics { + /// Initializes new `DebugApiExtMetrics` + pub fn new() -> Self { + let mut apis = HashMap::default(); + for api in DebugApis::iter() { + apis.insert(api, DebugApiExtRpcMetrics::new_with_labels(&[("api", api.as_str())])); + } + Self { apis } + } + + /// Record a Debug API call async (tracks latency, requests, success, failures). + pub async fn record_operation_async(&self, api: DebugApis, f: F) -> Result + where + F: Future>, + { + if let Some(metrics) = self.apis.get(&api) { + metrics.record_async(f).await + } else { + f.await + } + } +} + +impl Default for DebugApiExtMetrics { + fn default() -> Self { + Self::new() + } +} + +/// Optimism Debug API extension metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.debug_api_ext")] +pub struct DebugApiExtRpcMetrics { + /// End-to-end time to handle this API call + pub(crate) latency: Histogram, + + /// Total number of requests for this API + pub(crate) requests: Counter, + + /// Total number of successful responses for this API + pub(crate) successful_responses: Counter, + + /// Total number of failures for this API + pub(crate) failures: Counter, +} + +impl DebugApiExtRpcMetrics { + /// Record rpc api call async. + async fn record_async(&self, f: F) -> Result + where + F: Future>, + { + let start = Instant::now(); + let result = f.await; + + self.latency.record(start.elapsed().as_secs_f64()); + self.requests.increment(1); + + if result.is_ok() { + self.successful_responses.increment(1); + } else { + self.failures.increment(1); + } + + result + } +} diff --git a/rust/op-reth/crates/rpc/src/state.rs b/rust/op-reth/crates/rpc/src/state.rs new file mode 100644 index 0000000000000..7fd5c2a0d3618 --- /dev/null +++ b/rust/op-reth/crates/rpc/src/state.rs @@ -0,0 +1,62 @@ +//! State provider factory for OP Proofs ExEx. + +use alloy_eips::BlockId; +use derive_more::Constructor; +use jsonrpsee_types::error::ErrorObject; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore, provider::OpProofsStateProviderRef}; +use reth_provider::{BlockIdReader, ProviderError, ProviderResult, StateProvider}; +use reth_rpc_api::eth::helpers::FullEthApi; +use reth_rpc_eth_types::EthApiError; + +/// Creates a factory for state providers using OP Proofs external proofs storage. +#[derive(Debug, Constructor)] +pub struct OpStateProviderFactory { + eth_api: Eth, + preimage_store: OpProofsStorage

, +} + +impl<'a, Eth, P> OpStateProviderFactory +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'a, +{ + /// Creates a state provider for the given block id. + pub async fn state_provider( + &'a self, + block_id: Option, + ) -> ProviderResult> { + let block_id = block_id.unwrap_or_default(); + // Check whether the distance to the block exceeds the maximum configured window. + let block_number = self + .eth_api + .provider() + .block_number_for_id(block_id)? + .ok_or(EthApiError::HeaderNotFound(block_id)) + .map_err(ProviderError::other)?; + + let historical_provider = + self.eth_api.state_at_block_id(block_id).await.map_err(ProviderError::other)?; + + let (Some((latest_block_number, _)), Some((earliest_block_number, _))) = ( + self.preimage_store + .get_latest_block_number() + .map_err(|e| ProviderError::Database(e.into()))?, + self.preimage_store + .get_earliest_block_number() + .map_err(|e| ProviderError::Database(e.into()))?, + ) else { + // if no earliest block, db is empty - use historical provider + return Ok(historical_provider); + }; + + if block_number < earliest_block_number || block_number > latest_block_number { + return Ok(historical_provider); + } + + let external_overlay_provider = + OpProofsStateProviderRef::new(historical_provider, &self.preimage_store, block_number); + + Ok(Box::new(external_overlay_provider)) + } +} diff --git a/rust/op-reth/crates/tests/Makefile b/rust/op-reth/crates/tests/Makefile new file mode 100644 index 0000000000000..23bd092a1d0af --- /dev/null +++ b/rust/op-reth/crates/tests/Makefile @@ -0,0 +1,93 @@ +# Variables +DOCKER_IMAGE_NAME := op-reth +DOCKER_TAG := local +DOCKERFILE_PATH := ../../../DockerfileOpProof +KURTOSIS_PACKAGE := github.com/ethpandaops/optimism-package@998796c0f3bb478d63d729e65f0b76e24112e00d +DEVNET ?= opgeth-seq-opreth-val +GO_PKG_NAME ?= proofs/core +SOURCE_DIR := $(shell pwd) +OP_DEVSTACK_PROOF_SEQUENCER_EL ?= op-geth +OP_DEVSTACK_PROOF_VALIDATOR_EL ?= op-reth-with-proof + +.PHONY: all build-docker build-contracts unzip-contract-artifacts update-packages run clean help + +# Default target +all: build-docker run + +# Build op-reth +build: + @echo "Building op-reth binary..." + cd ../../../ && cargo build --bin op-reth --manifest-path crates/optimism/bin/op-reth/Cargo.toml + +# Build the op-reth Docker image +build-docker: + @echo "Building $(DOCKER_IMAGE_NAME):$(DOCKER_TAG) Docker image..." + cd ../../../ && docker build -f $(notdir $(DOCKERFILE_PATH)) -t $(DOCKER_IMAGE_NAME):$(DOCKER_TAG) . + +# Build coverage-enabled op-reth Docker image +build-docker-with-cov: + @echo "Building coverage-enabled $(DOCKER_IMAGE_NAME):cov Docker image..." + cd ../../../ && docker build \ + --build-arg RUSTFLAGS="-Cinstrument-coverage" \ + --build-arg CARGO_INCREMENTAL=0 \ + --build-arg LLVM_PROFILE_FILE="/coverage/%m-%p.profraw" \ + -f $(notdir $(DOCKERFILE_PATH)) \ + -t $(DOCKER_IMAGE_NAME):$(DOCKER_TAG) . + +# Run Kurtosis with the optimism devnet +run: + @echo "Starting Optimism devnet with historical proof configuration..." + @DEVNET_PATH="./devnets/$(DEVNET).yaml"; \ + if [ ! -z "$(DEVNET_CUSTOM_PATH)" ]; then \ + DEVNET_PATH="$(DEVNET_CUSTOM_PATH)"; \ + fi; \ + kurtosis run $(KURTOSIS_PACKAGE) --args-file $$DEVNET_PATH --enclave $(DEVNET) + +# Build smart contract artifacts with Foundry +build-contracts: + @echo "Building contracts with forge..." + @cd "$(SOURCE_DIR)/proofs/contracts" && forge build || { echo "forge build failed"; exit 1; } + +# Unzip contract artifacts +unzip-contract-artifacts: + @echo "Unzipping contract artifacts..." + mkdir -p "$(SOURCE_DIR)/artifacts/src"; \ + tar --zstd -xf "$(SOURCE_DIR)/artifacts/compressed/artifacts.tzst" -C "$(SOURCE_DIR)/artifacts/src" + +# Update contract artifacts from the optimism submodule +update-packages: + @echo "Updating contract artifacts from optimism submodule..." + cd "$(SOURCE_DIR)/optimism/op-deployer" && just build-contracts copy-contract-artifacts + mkdir -p "$(SOURCE_DIR)/artifacts/compressed" + cp "$(SOURCE_DIR)/optimism/op-deployer/pkg/deployer/artifacts/forge-artifacts/artifacts.tzst" "$(SOURCE_DIR)/artifacts/compressed/artifacts.tzst" + +# Run E2E tests using Kurtosis +test-e2e-kurtosis: build-contracts + @echo "Running E2E tests with Kurtosis for $(DEVNET)" + @DEVNET_PATH="$(SOURCE_DIR)/devnets/$(DEVNET).yaml"; \ + if [ ! -z "$(DEVNET_CUSTOM_PATH)" ]; then \ + DEVNET_PATH="$(DEVNET_CUSTOM_PATH)"; \ + fi; \ + export OP_DEPLOYER_ARTIFACTS="$(SOURCE_DIR)/artifacts/src/forge-artifacts"; \ + export DEVNET_ENV_URL="ktnative://$(DEVNET)$$DEVNET_PATH"; \ + export DISABLE_OP_E2E_LEGACY=true; \ + export DEVSTACK_ORCHESTRATOR=sysext; \ + go test -count=1 -timeout 40m -v ./$(GO_PKG_NAME) + +# Run E2E tests using Sysgo +test-e2e-sysgo: unzip-contract-artifacts build-contracts + @echo "Running E2E tests with Sysgo" + export OP_DEPLOYER_ARTIFACTS="$(SOURCE_DIR)/artifacts/src/forge-artifacts"; \ + export DISABLE_OP_E2E_LEGACY=true; \ + export DEVSTACK_ORCHESTRATOR=sysgo; \ + export OP_RETH_ENABLE_PROOF_HISTORY=true; \ + export SKIP_P2P_CONNECTION_CHECK=true; \ + export OP_RETH_EXEC_PATH="${SOURCE_DIR}/../../../target/debug/op-reth"; \ + export OP_DEVSTACK_PROOF_SEQUENCER_EL=$(OP_DEVSTACK_PROOF_SEQUENCER_EL); \ + export OP_DEVSTACK_PROOF_VALIDATOR_EL=$(OP_DEVSTACK_PROOF_VALIDATOR_EL); \ + go test -count=1 -timeout 40m -v ./$(GO_PKG_NAME) + +# Stop and clean Kurtosis services +clean: + @echo "Cleaning up Kurtosis services..." + kurtosis clean -a diff --git a/rust/op-reth/crates/tests/README.md b/rust/op-reth/crates/tests/README.md new file mode 100644 index 0000000000000..7169c09da70b8 --- /dev/null +++ b/rust/op-reth/crates/tests/README.md @@ -0,0 +1,67 @@ +# E2E tests for op-reth + +This folder contains the end-to-end testing resources for op-reth. Tests use the Optimism "devstack" (from the Optimism monorepo) and Kurtosis to deploy ephemeral devnets. + +This README documents common workflows and Makefile commands used to build the local Docker image, start the devnet with Kurtosis, run e2e tests, and clean up resources. + +## Prerequisites + +- Docker (Desktop) running on your machine +- Kurtosis CLI installed and able to reach the Kurtosis engine +- Go (to run Go-based e2e tests) + +## Commands (Makefile targets) + +Build the Docker image used by the devnet (tags `op-reth:local`): + +```sh +make build +``` + +Start the Optimism devnet (default: `simple-historical-proof`): + +```sh +# uses the Makefile's DEVNET variable (devnets/.yaml) +# OPTIONAL. Default: opgeth-seq-opreth-val +make run DEVNET= + +# or with a custom devnet YAML path +make run DEVNET_CUSTOM_PATH=/absolute/path/to/devnet.yaml +``` + +Run the e2e test suite that exercises the deployed devnet (Go tests): + +```sh +# runs go test with a long timeout; set GO_PKG_NAME to the package to test +make test-e2e-kurtosis + +# run a specific test or package +make test-e2e-kurtosis GO_PKG_NAME=path/to/pkg +``` + +Stop and remove Kurtosis resources (cleanup): + +```sh +make clean +``` + +## Implementation notes + +- The Makefile in this directory calls the repository root `DockerfileOp` to build an op-reth image tagged `op-reth:local`. +- The default Kurtosis package used is `github.com/ethpandaops/optimism-package@1.4.0`. The Makefile passes the YAML under `devnets/$(DEVNET).yaml` to `kurtosis run`. + +## Quick workflow example + +```sh +# build image +make build + +# start devnet +make run + +# run tests (set GO_PKG_NAME if needed) +make test-e2e-kurtosis GO_PKG_NAME=proofs + +# cleanup +make clean +``` diff --git a/rust/op-reth/crates/tests/artifacts/.gitignore b/rust/op-reth/crates/tests/artifacts/.gitignore new file mode 100644 index 0000000000000..e1b09822c7963 --- /dev/null +++ b/rust/op-reth/crates/tests/artifacts/.gitignore @@ -0,0 +1,2 @@ +forge-artifacts +src \ No newline at end of file diff --git a/rust/op-reth/crates/tests/artifacts/compressed/README.md b/rust/op-reth/crates/tests/artifacts/compressed/README.md new file mode 100644 index 0000000000000..cc09a72587329 --- /dev/null +++ b/rust/op-reth/crates/tests/artifacts/compressed/README.md @@ -0,0 +1,2 @@ +Artifacts in this directory will be embedded inside the `op-deployer` binary. The directory can be populated by running +`make unzip-contract-artifacts`. \ No newline at end of file diff --git a/rust/op-reth/crates/tests/artifacts/compressed/artifacts.tzst b/rust/op-reth/crates/tests/artifacts/compressed/artifacts.tzst new file mode 100644 index 0000000000000..a3ebbe3866d73 Binary files /dev/null and b/rust/op-reth/crates/tests/artifacts/compressed/artifacts.tzst differ diff --git a/rust/op-reth/crates/tests/devnets/opgeth-seq-opreth-val.yaml b/rust/op-reth/crates/tests/devnets/opgeth-seq-opreth-val.yaml new file mode 100644 index 0000000000000..65aaa2f32034d --- /dev/null +++ b/rust/op-reth/crates/tests/devnets/opgeth-seq-opreth-val.yaml @@ -0,0 +1,74 @@ +# A simple network configuration for kurtosis (https://github.com/ethpandaops/optimism-package) +# Spins up chain with two participating EL/CL pairs. +# One with op-geth/op-node (sequencer role) and one with op-reth/op-node (verifier role). + +optimism_package: + observability: + enabled: true + grafana_params: + # Will load the dashboards from default branch. + dashboard_sources: + - github.com/op-rs/op-reth/etc/grafana + image: "grafana/grafana:12.3.0" + faucet: + enabled: true + test-sequencers: + sequencer: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-test-sequencer:9243bb0452efa3fd255556631688d1255723384a + enabled: true + chains: + chain0: + # Chain with only two nodes + participants: + sequencer: + el: + type: op-geth + log_level: "debug" + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: true + verifier: + el: + type: op-reth + # Note: we use the local image for now. This allows us to run the tests in CI pipelines without publishing new docker images every time. + image: op-reth:local + extra_params: [ + --proofs-history, + --proofs-history.window=200, + --proofs-history.prune-interval=1m, + --proofs-history.storage-path=/data/proofs-history + ] + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: false + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + + global_log_level: "info" + global_node_selectors: {} + global_tolerations: [] + persistent: false +ethereum_package: + participants: + - el_type: geth + cl_type: teku + cl_image: consensys/teku:25.7.1 + network_params: + preset: minimal + genesis_delay: 5 + additional_preloaded_contracts: ' + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": "1" + } + } + ' diff --git a/rust/op-reth/crates/tests/devnets/opreth-seq-opgeth-val.yaml b/rust/op-reth/crates/tests/devnets/opreth-seq-opgeth-val.yaml new file mode 100644 index 0000000000000..2b43aa72d173e --- /dev/null +++ b/rust/op-reth/crates/tests/devnets/opreth-seq-opgeth-val.yaml @@ -0,0 +1,74 @@ +# A simple network configuration for kurtosis (https://github.com/ethpandaops/optimism-package) +# Spins up chain with two participating EL/CL pairs. +# One with op-geth/op-node (verifier role) and one with op-reth/op-node (sequencer role). + +optimism_package: + observability: + enabled: true + grafana_params: + # Will load the dashboards from default branch. + dashboard_sources: + - github.com/op-rs/op-reth/etc/grafana + image: "grafana/grafana:12.3.0" + faucet: + enabled: true + test-sequencers: + sequencer: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-test-sequencer:9243bb0452efa3fd255556631688d1255723384a + enabled: true + chains: + chain0: + # Chain with only two nodes + participants: + sequencer: + el: + type: op-reth + # Note: we use the local image for now. This allows us to run the tests in CI pipelines without publishing new docker images every time. + image: op-reth:local + extra_params: [ + --proofs-history, + --proofs-history.window=200, + --proofs-history.prune-interval=1m, + --proofs-history.storage-path=/data/proofs-history + ] + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: true + verifier: + el: + type: op-geth + log_level: "debug" + cl: + type: op-node + log_level: "debug" + extra_params: [--experimental.sequencer-api=true] + sequencer: false + + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + global_log_level: "info" + global_node_selectors: {} + global_tolerations: [] + persistent: false +ethereum_package: + participants: + - el_type: geth + cl_type: teku + cl_image: consensys/teku:25.7.1 + network_params: + preset: minimal + genesis_delay: 5 + additional_preloaded_contracts: ' + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": "1" + } + } + ' diff --git a/rust/op-reth/crates/tests/go.mod b/rust/op-reth/crates/tests/go.mod new file mode 100644 index 0000000000000..323b57f1dfa9d --- /dev/null +++ b/rust/op-reth/crates/tests/go.mod @@ -0,0 +1,286 @@ +module github.com/op-rs/op-geth + +go 1.24.0 + +// We're using the "develop" branch of the Optimism repo to include the latest changes to the `devstack` package. +require github.com/ethereum-optimism/optimism v1.16.4 + +require ( + github.com/BurntSushi/toml v1.5.0 + github.com/bmatcuk/doublestar/v4 v4.8.1 + github.com/chelnak/ysmrr v0.6.0 + github.com/ethereum/go-ethereum v1.16.3 + github.com/stretchr/testify v1.10.0 + golang.org/x/sync v0.18.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/VictoriaMetrics/fastcache v1.13.0 // indirect + github.com/adrg/xdg v0.4.0 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/base/go-bip39 v1.1.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/btcsuite/btcd v0.24.2 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect + github.com/btcsuite/btcd/btcutil v1.1.5 // indirect + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect + github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v1.1.5 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/coder/websocket v1.8.13 // indirect + github.com/consensys/gnark-crypto v0.18.1 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/dchest/siphash v1.2.3 // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/deepmap/oapi-codegen v1.8.2 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect + github.com/docker/docker v27.5.1+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect + github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect + github.com/elastic/gosigar v0.14.3 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e // indirect + github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect + github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect + github.com/flynn/noise v1.1.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-yaml/yaml v2.1.0+incompatible // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/graph-gophers/graphql-go v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-bexpr v0.1.11 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-immutable-radix v1.0.0 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.0 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/raft v1.7.3 // indirect + github.com/hashicorp/raft-boltdb/v2 v2.3.1 // indirect + github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/honeycombio/otel-config-go v1.17.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect + github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect + github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-datastore v0.6.0 // indirect + github.com/ipfs/go-ds-leveldb v0.5.0 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2 // indirect + github.com/kurtosis-tech/kurtosis/api/golang v1.8.2-0.20250602144112-2b7d06430e48 // indirect + github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b // indirect + github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc // indirect + github.com/kurtosis-tech/kurtosis/path-compression v0.0.0-20250108161014-0819b8ca912f // indirect + github.com/kurtosis-tech/stacktrace v0.0.0-20211028211901-1c67a77b5409 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p v0.36.2 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-mplex v0.9.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.12.0 // indirect + github.com/libp2p/go-libp2p-testing v0.12.0 // indirect + github.com/libp2p/go-mplex v0.7.0 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/lmittmann/w3 v0.19.5 // indirect + github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mholt/archiver v3.1.1+incompatible // indirect + github.com/miekg/dns v1.1.62 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/pointerstructure v1.2.1 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.14.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.5.0 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect + github.com/nwaples/rardecode v1.1.3 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/onsi/ginkgo/v2 v2.20.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.34 // indirect + github.com/pion/interceptor v0.1.30 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/mdns v0.0.12 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.14 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect + github.com/pion/sdp/v3 v3.0.9 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v2 v2.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v2 v2.1.6 // indirect + github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/protolambda/ctxlock v0.1.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/quic-go v0.46.0 // indirect + github.com/quic-go/webtransport-go v0.8.0 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rs/cors v1.11.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/schollz/progressbar/v3 v3.18.0 // indirect + github.com/sethvargo/go-envconfig v1.1.0 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect + github.com/shirou/gopsutil/v4 v4.24.6 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/urfave/cli/v2 v2.27.6 // indirect + github.com/wlynxg/anet v0.0.4 // indirect + github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.etcd.io/bbolt v1.3.5 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/host v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.28.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.22.2 // indirect + go.uber.org/mock v0.4.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.38.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + lukechampine.com/blake3 v1.3.0 // indirect +) + +replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101605.0-rc.1 + +replace github.com/ethereum-optimism/optimism => ./optimism diff --git a/rust/op-reth/crates/tests/go.sum b/rust/op-reth/crates/tests/go.sum new file mode 100644 index 0000000000000..fc2c7004e3e61 --- /dev/null +++ b/rust/op-reth/crates/tests/go.sum @@ -0,0 +1,1192 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= +github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= +github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= +github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= +github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/base/go-bip39 v1.1.0 h1:ely6zK09KaQbfX8wpcmN4pRXy0SbbqMT2QF45P1BNh0= +github.com/base/go-bip39 v1.1.0/go.mod h1:grZZXX8gYycovDC4cLS/RS0DmctofwHN+MUhedYCbO0= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= +github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chelnak/ysmrr v0.6.0 h1:kMhO0oI02tl/9szvxrOE0yeImtrK4KQhER0oXu1K/iM= +github.com/chelnak/ysmrr v0.6.0/go.mod h1:56JSrmQgb7/7xoMvuD87h3PE/qW6K1+BQcrgWtVLTUo= +github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= +github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= +github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= +github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= +github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= +github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= +github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA= +github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e h1:iy1vBIzACYUyOVyoADUwvAiq2eOPC0yVsDUdolPwQjk= +github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e/go.mod h1:DYj7+vYJ4cIB7zera9mv4LcAynCL5u4YVfoeUu6Wa+w= +github.com/ethereum-optimism/op-geth v1.101605.0-rc.1 h1:rzmwuBKOMZnQc4QNBm5iEqBrnEo1M5cbklWHkC5Oszo= +github.com/ethereum-optimism/op-geth v1.101605.0-rc.1/go.mod h1:9J7De8kDwXE/lrMgVEHc0F33TZqcN1Lb5nYaW6UZt38= +github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e h1:TO1tUcwbhIrNuea/LCsQJSQ5HDWCHdrzT/5MLC1aIU4= +github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e/go.mod h1:NZ816PzLU1TLv1RdAvYAb6KWOj4Zm5aInT0YpDVml2Y= +github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= +github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 h1:Ep/joEub9YwcjRY6ND3+Y/w0ncE540RtGatVhtZL0/Q= +github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/pprof v0.0.0-20241009165004-a3522334989c h1:NDovD0SMpBYXlE1zJmS1q55vWB/fUQBcPAqAboZSccA= +github.com/google/pprof v0.0.0-20241009165004-a3522334989c/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.11 h1:6DqdA/KBjurGby9yTY0bmkathya0lfwF2SeuubCI7dY= +github.com/hashicorp/go-bexpr v0.1.11/go.mod h1:f03lAo0duBlDIUMGCuad8oLcgejw4m7U+N8T+6Kz1AE= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/raft v1.7.3 h1:DxpEqZJysHN0wK+fviai5mFcSYsCkNpFUl1xpAW8Rbo= +github.com/hashicorp/raft v1.7.3/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= +github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e h1:SK4y8oR4ZMHPvwVHryKI88kJPJda4UyWYvG5A6iEQxc= +github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e/go.mod h1:EMz/UIuG93P0MBeHh6CbXQAEe8ckVJLZjhD17lBzK5Q= +github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc= +github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/honeycombio/otel-config-go v1.17.0 h1:3/zig0L3IGnfgiCrEfAwBsM0rF57+TKTyJ/a8yqW2eM= +github.com/honeycombio/otel-config-go v1.17.0/go.mod h1:g2mMdfih4sYKfXBtz2mNGvo3HiQYqX4Up4pdA8JOF2s= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs= +github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2 h1:izciXrFyFR+ihJ7nLTOkoIX5GzBPIp8gVKlw94gIc98= +github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2/go.mod h1:bWSMQK3WHVTGHX9CjxPAb/LtzcmfOxID2wdzakSWQxo= +github.com/kurtosis-tech/kurtosis/api/golang v1.8.2-0.20250602144112-2b7d06430e48 h1:iBbwJrQQ+9Erq9FiEJAp/Rk4ZdMBvA8UX+irXleWu+c= +github.com/kurtosis-tech/kurtosis/api/golang v1.8.2-0.20250602144112-2b7d06430e48/go.mod h1:VZXj/IVyUGVSFy27sD6BJp+6dhZgveuOLPT/crpGjxg= +github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b h1:hMoIM99QKcYQqsnK4AF7Lovi9ZD9ac6lZLZ5D/jx2x8= +github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b/go.mod h1:4pFdrRwDz5R+Fov2ZuTaPhAVgjA2jhGh1Izf832sX7A= +github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc h1:7IlEpSehmWcNXOFpNP24Cu5HQI3af7GCBQw//m+LnvQ= +github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc/go.mod h1:TOWMQgvAJH/NiWWERGXg/plT9lS7aFcXFxCa0M5sfHo= +github.com/kurtosis-tech/kurtosis/path-compression v0.0.0-20250108161014-0819b8ca912f h1:kys3RDy0uHk+VwYS1mVh48YnogkRTAxUUCV7kpwMNOQ= +github.com/kurtosis-tech/kurtosis/path-compression v0.0.0-20250108161014-0819b8ca912f/go.mod h1:aDMrPeS7Gii8W6SDKSKyrBNgEQAUYidriyeKGf+Ml3I= +github.com/kurtosis-tech/stacktrace v0.0.0-20211028211901-1c67a77b5409 h1:YQTATifMUwZEtZYb0LVA7DK2pj8s71iY8rzweuUQ5+g= +github.com/kurtosis-tech/stacktrace v0.0.0-20211028211901-1c67a77b5409/go.mod h1:y5weVs5d9wXXHcDA1awRxkIhhHC1xxYJN8a7aXnE6S8= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= +github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-mplex v0.9.0 h1:R58pDRAmuBXkYugbSSXR9wrTX3+1pFM1xP2bLuodIq8= +github.com/libp2p/go-libp2p-mplex v0.9.0/go.mod h1:ro1i4kuwiFT+uMPbIDIFkcLs1KRbNp0QwnUXM+P64Og= +github.com/libp2p/go-libp2p-pubsub v0.12.0 h1:PENNZjSfk8KYxANRlpipdS7+BfLmOl3L2E/6vSNjbdI= +github.com/libp2p/go-libp2p-pubsub v0.12.0/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/lmittmann/w3 v0.19.5 h1:WwVRyIwhRLfIahmpB1EglsB3o1XWsgydgrxIUp5upFQ= +github.com/lmittmann/w3 v0.19.5/go.mod h1:pN97sGGYGvsbqOYj/ms3Pd+7k/aiK/9OpNcxMmmzSOI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU= +github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc= +github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= +github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= +github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= +github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= +github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= +github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= +github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/protolambda/ctxlock v0.1.0 h1:rCUY3+vRdcdZXqT07iXgyr744J2DU2LCBIXowYAjBCE= +github.com/protolambda/ctxlock v0.1.0/go.mod h1:vefhX6rIZH8rsg5ZpOJfEDYQOppZi19SfPiGOFrNnwM= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= +github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= +github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= +github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= +github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sethvargo/go-envconfig v1.1.0 h1:cWZiJxeTm7AlCvzGXrEXaSTCNgip5oJepekh/BOQuog= +github.com/sethvargo/go-envconfig v1.1.0/go.mod h1:JLd0KFWQYzyENqnEPWWZ49i4vzZo/6nRidxI8YvGiHw= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v4 v4.24.6 h1:9qqCSYF2pgOU+t+NgJtp7Co5+5mHF/HyKBUckySQL64= +github.com/shirou/gopsutil/v4 v4.24.6/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g= +github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= +github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0 h1:KG6fOUk3EwSH1dEpsAbsLKFbn3cFwN9xDu8plGu55zI= +go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0/go.mod h1:bSd579exEkh/P5msRcom8YzVB6NsUxYKyV+D/FYOY7Y= +go.opentelemetry.io/contrib/instrumentation/host v0.53.0 h1:X4r+5n6bSqaQUbPlSO5baoM7tBvipkT0mJFyuPFnPAU= +go.opentelemetry.io/contrib/instrumentation/host v0.53.0/go.mod h1:NTaDj8VCnJxWleEcRQRQaN36+aCZjO9foNIdJunEjUQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0 h1:nOlJEAJyrcy8hexK65M+dsCHIx7CVVbybcFDNkcTcAc= +go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0/go.mod h1:u79lGGIlkg3Ryw425RbMjEkGYNxSnXRyR286O840+u4= +go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= +go.opentelemetry.io/contrib/propagators/b3 v1.28.0/go.mod h1:DWRkzJONLquRz7OJPh2rRbZ7MugQj62rk7g6HRnEqh0= +go.opentelemetry.io/contrib/propagators/ot v1.28.0 h1:rmlG+2pc5k5M7Y7izDrxAHZUIwDERdGMTD9oMV7llMk= +go.opentelemetry.io/contrib/propagators/ot v1.28.0/go.mod h1:MNgXIn+UrMbNGpd7xyckyo2LCHIgCdmdjEE7YNZGG+w= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= +lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/rust/op-reth/crates/tests/proofs/contracts/foundry.toml b/rust/op-reth/crates/tests/proofs/contracts/foundry.toml new file mode 100644 index 0000000000000..cad32fb0c14b8 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/contracts/foundry.toml @@ -0,0 +1,3 @@ +[profile.default] +src = "src" +out = "artifacts" diff --git a/rust/op-reth/crates/tests/proofs/contracts/src/MultiStorage.sol b/rust/op-reth/crates/tests/proofs/contracts/src/MultiStorage.sol new file mode 100644 index 0000000000000..fc0bac34e7460 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/contracts/src/MultiStorage.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +contract MultiStorage { + uint256 public slotA; + uint256 public slotB; + address public owner; + + constructor() { + owner = msg.sender; + } + + function setValues(uint256 _a, uint256 _b) external { + slotA = _a; + slotB = _b; + } +} \ No newline at end of file diff --git a/rust/op-reth/crates/tests/proofs/contracts/src/SimpleStorage.sol b/rust/op-reth/crates/tests/proofs/contracts/src/SimpleStorage.sol new file mode 100644 index 0000000000000..a975eab781f13 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/contracts/src/SimpleStorage.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +contract SimpleStorage { + uint256 public value; + + function setValue(uint256 newValue) external { + value = newValue; + } +} \ No newline at end of file diff --git a/rust/op-reth/crates/tests/proofs/contracts/src/TokenVault.sol b/rust/op-reth/crates/tests/proofs/contracts/src/TokenVault.sol new file mode 100644 index 0000000000000..3ee6f2564133b --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/contracts/src/TokenVault.sol @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @title TokenVault - realistic contract for eth_getProof testing +/// @notice Demonstrates mappings, nested mappings, and dynamic arrays +contract TokenVault { + struct Allowance { + uint256 amount; + bool active; + } + + // Mapping: user => balance + mapping(address => uint256) public balances; + + // Nested Mapping: owner => spender => allowance info + mapping(address => mapping(address => Allowance)) public allowances; + + // Dynamic array: list of all depositors + address[] public depositors; + + constructor() { + // initialize contract with a few entries + address alice = address(0xA11CE); + address bob = address(0xB0B); + + balances[alice] = 1000; + balances[bob] = 2000; + + allowances[alice][bob] = Allowance({amount: 300, active: true}); + allowances[bob][alice] = Allowance({amount: 150, active: false}); + + depositors.push(alice); + depositors.push(bob); + } + + function deposit() external payable { + balances[msg.sender] += msg.value; + depositors.push(msg.sender); + } + + function approve(address spender, uint256 amount) external { + allowances[msg.sender][spender] = Allowance({ + amount: amount, + active: true + }); + } + + function deactivateAllowance(address spender) external { + allowances[msg.sender][spender].active = false; + } + + function getDepositors() external view returns (address[] memory) { + return depositors; + } +} \ No newline at end of file diff --git a/rust/op-reth/crates/tests/proofs/core/account_proofs_test.go b/rust/op-reth/crates/tests/proofs/core/account_proofs_test.go new file mode 100644 index 0000000000000..15772bdafab91 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/core/account_proofs_test.go @@ -0,0 +1,119 @@ +package core + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +// TestL2MultipleTransactionsInDifferentBlocks tests transactions from different accounts +// on L2 across multiple blocks. This verifies account state changes across multiple L2 blocks. +// Check if the proof retrieved from geth and reth match for each account at each block height, +// and verify the proofs against the respective block state roots. +func TestL2MultipleTransactionsInDifferentBlocks(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + + const numAccounts = 2 + const initialFunding = 10 + accounts := sys.FunderL2.NewFundedEOAs(numAccounts, eth.Ether(initialFunding)) + + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + + // Block 1: Send transaction from first account + currentBlock := sys.L2ELSequencerNode().WaitForBlock() + t.Logf("Current L2 block number: %d", currentBlock.Number) + + transferAmount := eth.Ether(1) + tx1 := accounts[0].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 0: %s", accounts[0].Address().Hex()) + receipt1, err := tx1.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) + t.Logf("Transaction 1 included in block: %d", receipt1.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt1.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt1.BlockNumber.Uint64()) + sys.L2ELSequencerNode().WaitForBlockNumber(currentBlock.Number + 1) + + // Block 2: Send transaction from second account + currentBlock = sys.L2ELSequencerNode().WaitForBlock() + t.Logf("Current L2 block number: %d", currentBlock.Number) + + tx2 := accounts[1].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 1: %s", accounts[1].Address().Hex()) + receipt2, err := tx2.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt2.Status) + t.Logf("Transaction 2 included in block: %d", receipt2.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt2.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, receipt2.BlockNumber.Uint64()) + + // Also verify we can get proofs for account 0 at block 2 (different block height) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt2.BlockNumber.Uint64()) +} + +// TestL2MultipleTransactionsInSingleBlock tests 2 different accounts sending transactions +// that get included in the same L2 block. +// It verifies that the account proofs for both accounts can be retrieved and verified +// against the same block's state root, and that the proofs from geth and reth match. +func TestL2MultipleTransactionsInSingleBlock(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + + const numAccounts = 2 + const initialFunding = 10 + accounts := sys.FunderL2.NewFundedEOAs(numAccounts, eth.Ether(initialFunding)) + + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + + transferAmount := eth.Ether(1) + + t.Log("Sending transactions from both accounts") + tx0 := accounts[0].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 0: %s", accounts[0].Address().Hex()) + + tx1 := accounts[1].Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account 1: %s", accounts[1].Address().Hex()) + + // Wait for both transactions to be included + receipt0, err := tx0.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt0.Status) + t.Logf("Transaction 0 included in block %d", receipt0.BlockNumber.Uint64()) + + receipt1, err := tx1.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) + t.Logf("Transaction 1 included in block %d", receipt1.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt1.BlockNumber.Uint64()) + // Txns can land in the same or different blocks depending on timing. + if receipt0.BlockNumber.Uint64() == receipt1.BlockNumber.Uint64() { + t.Logf("Both transactions included in the same L2 block: %d", receipt0.BlockNumber.Uint64()) + + // Verify both proofs against the same block state root + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt0.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, receipt0.BlockNumber.Uint64()) + + } else { + t.Logf("Transactions in different blocks: %d and %d", + receipt0.BlockNumber.Uint64(), receipt1.BlockNumber.Uint64()) + + // Different blocks: verify each proof's merkle root matches its respective block's state root + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, receipt0.BlockNumber.Uint64()) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, receipt1.BlockNumber.Uint64()) + } + + t.Logf("Proof for account 0 and 1 verified successfully") +} diff --git a/rust/op-reth/crates/tests/proofs/core/execute_payload_test.go b/rust/op-reth/crates/tests/proofs/core/execute_payload_test.go new file mode 100644 index 0000000000000..d49a82b945116 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/core/execute_payload_test.go @@ -0,0 +1,118 @@ +package core + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/op-rs/op-geth/proofs/utils" +) + +func TestExecutePayloadSuccess(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + opRethELNode := sys.RethWithProofL2ELNode() + + plannedTxOption := user.PlanTransfer(user.Address(), eth.OneWei) + plannedTx := txplan.NewPlannedTx(plannedTxOption) + signedTx, err := plannedTx.Signed.Eval(ctx) + if err != nil { + gt.Fatal(err) + } + + raw, err := signedTx.MarshalBinary() + if err != nil { + gt.Fatal(err) + } + + lastBlock, err := opRethELNode.Escape().L2EthClient().InfoByLabel(ctx, eth.Unsafe) + if err != nil { + gt.Fatal(err) + } + + blockTime := lastBlock.Time() + 1 + gasLimit := eth.Uint64Quantity(lastBlock.GasLimit()) + + var prevRandao eth.Bytes32 + copy(prevRandao[:], lastBlock.MixDigest().Bytes()) + + var zero1559 eth.Bytes8 + minBaseFee := uint64(10) + + attrs := eth.PayloadAttributes{ + Timestamp: eth.Uint64Quantity(blockTime), + PrevRandao: prevRandao, + SuggestedFeeRecipient: lastBlock.Coinbase(), + Withdrawals: nil, + ParentBeaconBlockRoot: lastBlock.ParentBeaconRoot(), + Transactions: []eth.Data{eth.Data(raw)}, + NoTxPool: true, + GasLimit: &gasLimit, + EIP1559Params: &zero1559, + MinBaseFee: &minBaseFee, + } + + witness, err := opRethELNode.Escape().L2EthClient().PayloadExecutionWitness(ctx, lastBlock.Hash(), attrs) + if err != nil { + gt.Fatal(err) + } + if witness == nil { + gt.Fatal("empty witness") + } +} + +func TestExecutePayloadWithInvalidParentHash(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + opRethELNode := sys.RethWithProofL2ELNode() + + plannedTxOption := user.PlanTransfer(user.Address(), eth.OneWei) + plannedTx := txplan.NewPlannedTx(plannedTxOption) + signedTx, err := plannedTx.Signed.Eval(ctx) + if err != nil { + gt.Fatal(err) + } + + raw, err := signedTx.MarshalBinary() + if err != nil { + gt.Fatal(err) + } + + lastBlock, err := opRethELNode.Escape().L2EthClient().InfoByLabel(ctx, eth.Unsafe) + if err != nil { + gt.Fatal(err) + } + + blockTime := lastBlock.Time() + 1 + gasLimit := eth.Uint64Quantity(lastBlock.GasLimit()) + + var prevRandao eth.Bytes32 + copy(prevRandao[:], lastBlock.MixDigest().Bytes()) + + var zero1559 eth.Bytes8 + minBaseFee := uint64(10) + + attrs := eth.PayloadAttributes{ + Timestamp: eth.Uint64Quantity(blockTime), + PrevRandao: prevRandao, + SuggestedFeeRecipient: lastBlock.Coinbase(), + Withdrawals: nil, + ParentBeaconBlockRoot: lastBlock.ParentBeaconRoot(), + Transactions: []eth.Data{eth.Data(raw)}, + NoTxPool: true, + GasLimit: &gasLimit, + EIP1559Params: &zero1559, + MinBaseFee: &minBaseFee, + } + + _, err = opRethELNode.Escape().L2EthClient().PayloadExecutionWitness(ctx, common.Hash{}, attrs) + if err == nil { + gt.Fatal("expected error") + } +} diff --git a/rust/op-reth/crates/tests/proofs/core/execution_witness_test.go b/rust/op-reth/crates/tests/proofs/core/execution_witness_test.go new file mode 100644 index 0000000000000..eaf0742e1041f --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/core/execution_witness_test.go @@ -0,0 +1,126 @@ +package core + +import ( + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +// ExecutionWitness represents the response from debug_executionWitness +type ExecutionWitness struct { + Keys []hexutil.Bytes `json:"keys"` + Codes []hexutil.Bytes `json:"codes"` + State []hexutil.Bytes `json:"state"` + Headers []hexutil.Bytes `json:"headers"` +} + +// TestDebugExecutionWitness tests the debug_executionWitness RPC method on Reth L2. +// This verifies that the execution witness can be retrieved for a block containing transactions +// and that the response contains valid state, codes, keys, and headers data. +func TestDebugExecutionWitness(gt *testing.T) { + t := devtest.SerialT(gt) + sys := utils.NewMixedOpProofPreset(t) + opRethELNode := sys.RethWithProofL2ELNode() + + // Create a funded account and recipient + account := sys.FunderL2.NewFundedEOA(eth.Ether(10)) + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + + // Wait for current block + currentBlock := sys.L2ELSequencerNode().WaitForBlock() + t.Logf("Current L2 block number: %d", currentBlock.Number) + + // Send a transaction to create some state changes + transferAmount := eth.Ether(1) + tx := account.Transfer(recipientAddr, transferAmount) + t.Logf("Sent transaction from account: %s to recipient: %s", account.Address().Hex(), recipientAddr.Hex()) + + receipt, err := tx.Included.Eval(t.Ctx()) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + t.Logf("Transaction included in block: %d", receipt.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt.BlockNumber.Uint64()) + l2RethClient := opRethELNode.Escape().L2EthClient() + + // Get the block to inspect the state changes + block, err := l2RethClient.InfoByNumber(t.Ctx(), receipt.BlockNumber.Uint64()) + require.NoError(t, err) + t.Logf("Block %d has state root: %s", block.NumberU64(), block.Root().Hex()) + + // Call debug_executionWitness via RPC + var witness ExecutionWitness + blockNumberHex := hexutil.EncodeUint64(block.NumberU64()) + + // Use the RPC client's CallContext method directly + err = l2RethClient.RPC().CallContext(t.Ctx(), &witness, "debug_executionWitness", blockNumberHex) + require.NoError(t, err, "debug_executionWitness RPC call should succeed") + + // Verify the witness contains expected data + require.NotEmpty(t, witness.Keys, "Witness should contain keys data") + require.NotEmpty(t, witness.Codes, "Witness should contain codes data") + require.NotEmpty(t, witness.State, "State should not be empty") + require.NotNil(t, witness.Headers, "Witness should contain headers data") + + // Verify the parent header is present and decode it + require.NotEmpty(t, witness.Headers, "Headers should contain at least the parent block") + parentHeaderBytes := witness.Headers[len(witness.Headers)-1] + require.NotEmpty(t, parentHeaderBytes, "Parent header should not be empty") + t.Logf("Parent header size: %d bytes", len(parentHeaderBytes)) + + // Decode the parent header to verify it's valid RLP and extract state root + var parentHeader types.Header + err = rlp.DecodeBytes(parentHeaderBytes, &parentHeader) + require.NoError(t, err, "Parent header should be valid RLP-encoded") + + // Verify the parent header matches the expected parent block + expectedParentNumber := block.NumberU64() - 1 + require.Equal(t, expectedParentNumber, parentHeader.Number.Uint64(), + "Parent header should be for block %d", expectedParentNumber) + + // Get the actual parent block from the chain to verify state root + actualParentBlock, err := l2RethClient.InfoByNumber(t.Ctx(), expectedParentNumber) + require.NoError(t, err, "Should be able to fetch parent block from chain") + + // Verify the parent header's state root matches the actual parent block's state root + require.Equal(t, actualParentBlock.Root(), parentHeader.Root, + "Parent header state root in witness should match actual parent block state root") + t.Logf("Verified parent header state root matches chain: %s", parentHeader.Root.Hex()) + + // Verify that the witness contains keys for the accounts involved in the transaction + senderAddrHex := strings.ToLower(account.Address().Hex()) + recipientAddrHex := strings.ToLower(recipientAddr.Hex()) + + // Check if the witness keys contains the accounts + // The witness format may vary, so we check for the presence of either the address or its hash + foundSender := false + foundRecipient := false + + for _, value := range witness.Keys { + keyLower := strings.ToLower(value.String()) + if strings.Contains(keyLower, senderAddrHex) { + foundSender = true + } + if strings.Contains(keyLower, recipientAddrHex) { + foundRecipient = true + } + } + + // We should find at least the sender since they initiated the transaction + require.True(t, foundSender, "Witness should contain state data for the transaction sender") + t.Logf("Verified sender account is present in execution witness") + + // The recipient might not always be in the witness depending on the implementation + if foundRecipient { + t.Logf("Verified recipient account is present in execution witness") + } + t.Log("Successfully retrieved and validated execution witness from Reth") +} diff --git a/rust/op-reth/crates/tests/proofs/core/init_test.go b/rust/op-reth/crates/tests/proofs/core/init_test.go new file mode 100644 index 0000000000000..df6fe1ec27d7a --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/core/init_test.go @@ -0,0 +1,14 @@ +package core + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/op-rs/op-geth/proofs/utils" +) + +// TestMain creates the test-setups against the shared backend +func TestMain(m *testing.M) { + // Other setups may be added here, hydrated from the same orchestrator + presets.DoMain(m, utils.WithMixedOpProofPreset()) +} diff --git a/rust/op-reth/crates/tests/proofs/core/resyncing_test.go b/rust/op-reth/crates/tests/proofs/core/resyncing_test.go new file mode 100644 index 0000000000000..1e7722a3055f2 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/core/resyncing_test.go @@ -0,0 +1,64 @@ +package core + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +func TestResyncing(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := utils.NewMixedOpProofPreset(t) + + alice := sys.FunderL2.NewFundedEOA(eth.OneEther) + bob := sys.FunderL2.NewFundedEOA(eth.OneEther) + + tx := alice.Transfer(bob.Address(), eth.OneHundredthEther) + receipt, err := tx.Included.Eval(ctx) + require.NoError(gt, err) + require.Equal(gt, types.ReceiptStatusSuccessful, receipt.Status) + + t.Logf("Stopping validator L2 CL and EL to simulate downtime") + // According to devnet config, `B` will be the validator node. + sys.L2ELValidatorNode().Stop() + sys.L2CLValidator.Stop() + + var blockNumbers []uint64 + // produce some transactions while the node is down + for i := 0; i < 5; i++ { + tx := alice.Transfer(bob.Address(), eth.OneHundredthEther) + receipt, err := tx.Included.Eval(ctx) + require.NoError(gt, err) + require.Equal(gt, types.ReceiptStatusSuccessful, receipt.Status) + blockNumbers = append(blockNumbers, receipt.BlockNumber.Uint64()) + } + + // restart the node and ensure it can sync the missing blocks + t.Logf("Restarting validator L2 CL and EL to resync") + sys.L2ELValidatorNode().Start() + sys.L2CLValidator.Start() + + time.Sleep(3 * time.Second) + + err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { + status := sys.L2CLValidator.SyncStatus() + return status.UnsafeL2.Number > blockNumbers[len(blockNumbers)-1], nil + }) + require.NoError(gt, err, "Validator L2 CL failed to resync to latest block") + + t.Logf("Fetching and verifying proofs for transactions produced while node was down") + // verify the proofs for the transactions produced while the node was down + for _, blockNumber := range blockNumbers { + utils.FetchAndVerifyProofs(t, sys, bob.Address(), []common.Hash{}, blockNumber) + utils.FetchAndVerifyProofs(t, sys, alice.Address(), []common.Hash{}, blockNumber) + } +} diff --git a/rust/op-reth/crates/tests/proofs/core/simple_storage_test.go b/rust/op-reth/crates/tests/proofs/core/simple_storage_test.go new file mode 100644 index 0000000000000..e2c59f6900081 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/core/simple_storage_test.go @@ -0,0 +1,172 @@ +package core + +import ( + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/op-rs/op-geth/proofs/utils" +) + +func TestStorageProofUsingSimpleStorageContract(gt *testing.T) { + t := devtest.SerialT(gt) + + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + + // deploy contract via helper + contract, receipt := utils.DeploySimpleStorage(t, user) + t.Logf("contract deployed at address %s in L2 block %d", contract.Address().Hex(), receipt.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt.BlockNumber.Uint64()) + // fetch and verify initial proof (should be zeroed storage) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0")}, receipt.BlockNumber.Uint64()) + + type caseEntry struct { + Block uint64 + Value *big.Int + } + var cases []caseEntry + for i := 1; i <= 5; i++ { + writeVal := big.NewInt(int64(i * 10)) + callRes := contract.SetValue(user, writeVal) + + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + Value: writeVal, + }) + t.Logf("setValue transaction included in L2 block %d", callRes.BlockNumber) + } + + // test reset storage to zero + callRes := contract.SetValue(user, big.NewInt(0)) + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + Value: big.NewInt(0), + }) + t.Logf("reset setValue transaction included in L2 block %d", callRes.BlockNumber) + + sys.L2ELValidatorNode().WaitForBlockNumber(callRes.BlockNumber.Uint64()) + // for each case, get proof and verify + for _, c := range cases { + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0")}, c.Block) + } + + // test with non-existent storage slot + nonExistentSlot := common.HexToHash("0xdeadbeef") + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{nonExistentSlot}, cases[len(cases)-1].Block) +} + +func TestStorageProofUsingMultiStorageContract(gt *testing.T) { + t := devtest.SerialT(gt) + + sys := utils.NewMixedOpProofPreset(t) + user := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + + // deploy contract via helper + contract, receipt := utils.DeployMultiStorage(t, user) + t.Logf("contract deployed at address %s in L2 block %d", contract.Address().Hex(), receipt.BlockNumber.Uint64()) + + sys.L2ELValidatorNode().WaitForBlockNumber(receipt.BlockNumber.Uint64()) + // fetch and verify initial proof (should be zeroed storage) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0"), common.HexToHash("0x1")}, receipt.BlockNumber.Uint64()) + + // set multiple storage slots + type caseEntry struct { + Block uint64 + SlotValues map[common.Hash]*big.Int + } + var cases []caseEntry + + for i := 1; i <= 5; i++ { + aVal := big.NewInt(int64(i * 10)) + bVal := big.NewInt(int64(i * 20)) + callRes := contract.SetValues(user, aVal, bVal) + + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + SlotValues: map[common.Hash]*big.Int{ + common.HexToHash("0x0"): aVal, + common.HexToHash("0x1"): bVal, + }, + }) + t.Logf("setValues transaction included in L2 block %d", callRes.BlockNumber) + } + + // test reset storage slots to zero + callRes := contract.SetValues(user, big.NewInt(0), big.NewInt(0)) + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + SlotValues: map[common.Hash]*big.Int{ + common.HexToHash("0x0"): big.NewInt(0), + common.HexToHash("0x1"): big.NewInt(0), + }, + }) + t.Logf("reset setValues transaction included in L2 block %d", callRes.BlockNumber) + + sys.L2ELValidatorNode().WaitForBlockNumber(callRes.BlockNumber.Uint64()) + // for each case, get proof and verify + for _, c := range cases { + var slots []common.Hash + for slot := range c.SlotValues { + slots = append(slots, slot) + } + + utils.FetchAndVerifyProofs(t, sys, contract.Address(), slots, c.Block) + } +} + +func TestTokenVaultStorageProofs(gt *testing.T) { + t := devtest.SerialT(gt) + + sys := utils.NewMixedOpProofPreset(t) + // funder EOA that will deploy / interact + alice := sys.FunderL2.NewFundedEOA(eth.OneEther) + bob := sys.FunderL2.NewFundedEOA(eth.OneEther) + + // deploy contract + contract, deployBlock := utils.DeployTokenVault(t, alice) + t.Logf("TokenVault deployed at %s block=%d", contract.Address().Hex(), deployBlock.BlockNumber.Uint64()) + + userAddr := alice.Address() + + // call deposit (payable) + depositAmount := eth.OneHundredthEther + depRes := contract.Deposit(alice, depositAmount) + depositBlock := depRes.BlockNumber.Uint64() + t.Logf("deposit included in block %d", depositBlock) + + // call approve(spender, amount) - use same user as spender for simplicity, or create another funded EOA + approveAmount := big.NewInt(100) + spenderAddr := bob.Address() + approveRes := contract.Approve(alice, spenderAddr, approveAmount) + approveBlock := approveRes.BlockNumber.Uint64() + t.Logf("approve included in block %d", approveBlock) + + // call deactivateAllowance(spender) + deactRes := contract.DeactivateAllowance(alice, spenderAddr) + deactBlock := deactRes.BlockNumber.Uint64() + t.Logf("deactivateAllowance included in block %d", deactBlock) + + sys.L2ELValidatorNode().WaitForBlockNumber(deactBlock) + + // balance slot for user + balanceSlot := contract.GetBalanceSlot(userAddr) + // nested allowance slot owner=user, spender=spenderAddr + allowanceSlot := contract.GetAllowanceSlot(userAddr, spenderAddr) + // depositors[0] element slot + depositor0Slot := contract.GetDepositorSlot(0) + + // fetch & verify proofs at appropriate blocks + // balance after deposit (depositBlock) + t.Logf("Verifying balance slot %s at deposit block %d", balanceSlot.Hex(), depositBlock) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{balanceSlot, depositor0Slot}, depositBlock) + // allowance after approve (approveBlock) + t.Logf("Verifying allowance slot %s at approve block %d", allowanceSlot.Hex(), approveBlock) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{allowanceSlot}, approveBlock) + // after deactivation, allowance should be zero at deactBlock + t.Logf("Verifying allowance slot %s at deactivate block %d", allowanceSlot.Hex(), deactBlock) + utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{allowanceSlot}, deactBlock) +} diff --git a/rust/op-reth/crates/tests/proofs/prune/init_test.go b/rust/op-reth/crates/tests/proofs/prune/init_test.go new file mode 100644 index 0000000000000..bd9082e259d41 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/prune/init_test.go @@ -0,0 +1,14 @@ +package prune + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/op-rs/op-geth/proofs/utils" +) + +// TestMain creates the test-setups against the shared backend +func TestMain(m *testing.M) { + // Other setups may be added here, hydrated from the same orchestrator + presets.DoMain(m, utils.WithMixedOpProofPreset()) +} diff --git a/rust/op-reth/crates/tests/proofs/prune/prune_test.go b/rust/op-reth/crates/tests/proofs/prune/prune_test.go new file mode 100644 index 0000000000000..528851a82c3a1 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/prune/prune_test.go @@ -0,0 +1,138 @@ +package prune + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +// Steps: +// 1) Create some tx and validate proof for a block (pre-prune): +// 2) Wait for that specific block to be pruned: +// - Ensure the chain advances enough so the pruner *can* move `earliest` past `targetBlock` +// (i.e. latest >= targetBlock + proofWindow). +// - Poll debug_proofsSyncStatus until earliest > targetBlock (meaning targetBlock is now pruned). +// +// 3) Call validate checks for getProof and check everything is consistent for the new earliest block. +func TestPruneProofStorageWithGetProofConsistency(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := utils.NewMixedOpProofPreset(t) + + // Defined in the devnet yaml + var proofWindow = uint64(200) + + // An expected time within the prune should be detected. + var pruneDetectTimeout = 5 * time.Minute + + opRethELNode := sys.RethWithProofL2ELNode() + ethClient := opRethELNode.Escape().EthClient() + + // ----------------------------- + // (1) Create tx + validate proof pre-prune + // ----------------------------- + const numAccounts = 2 + const initialFunding = 10 + + accounts := sys.FunderL2.NewFundedEOAs(numAccounts, eth.Ether(initialFunding)) + recipient := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + recipientAddr := recipient.Address() + transferAmount := eth.Ether(1) + + t.Log("Sending transactions from both accounts (to create state changes)") + tx0 := accounts[0].Transfer(recipientAddr, transferAmount) + tx1 := accounts[1].Transfer(recipientAddr, transferAmount) + + receipt0, err := tx0.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt0.Status) + + receipt1, err := tx1.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) + + // Choose a deterministic target block: the later of the two inclusion blocks. + targetBlock := receipt0.BlockNumber.Uint64() + if receipt1.BlockNumber.Uint64() > targetBlock { + targetBlock = receipt1.BlockNumber.Uint64() + } + t.Logf("Target block for proof validation (pre-prune): %d", targetBlock) + + // Make sure validator has the block too (keeps the test stable). + sys.L2ELValidatorNode().WaitForBlockNumber(targetBlock) + + // Pre-prune proof verification at targetBlock. + // This verifies the proof against the block's state root (efficient correctness check). + t.Logf("Pre-prune: verifying getProof proofs at block %d", targetBlock) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, targetBlock) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, targetBlock) + t.Log("Pre-prune: proofs verified successfully") + + // ----------------------------- + // (2) Wait until targetBlock is pruned (earliest > targetBlock) + // ----------------------------- + initialStatus := getProofSyncStatus(t, ethClient) + t.Logf("Initial proofs sync status: earliest=%d latest=%d", initialStatus.Earliest, initialStatus.Latest) + + // Ensure we advance far enough that pruning *can* move earliest past targetBlock. + // If latest < targetBlock + proofWindow, earliest cannot advance beyond targetBlock yet. + requiredLatest := targetBlock + proofWindow + if initialStatus.Latest < requiredLatest { + t.Logf("Waiting for chain to advance to at least block %d so pruning can pass targetBlock", requiredLatest) + opRethELNode.WaitForBlockNumber(requiredLatest) + } + + t.Logf("Waiting for pruner to advance earliest past targetBlock=%d ...", targetBlock) + waitUntil := time.Now().Add(pruneDetectTimeout) + + var prunedStatus proofSyncStatus + for { + if time.Now().After(waitUntil) { + t.Errorf("Timed out waiting for prune: earliest did not advance past targetBlock=%d within %s", targetBlock, pruneDetectTimeout) + } + + prunedStatus = getProofSyncStatus(t, ethClient) + t.Logf("Polling proofs sync status: earliest=%d latest=%d (target=%d)", prunedStatus.Earliest, prunedStatus.Latest, targetBlock) + + // This is the key condition: the specific block we validated earlier is now out of window. + if prunedStatus.Earliest > targetBlock { + break + } + + time.Sleep(5 * time.Second) + } + + currentProofWindow := prunedStatus.Latest - prunedStatus.Earliest + require.GreaterOrEqual(t, currentProofWindow, proofWindow, "pruner should maintain at least the configured proof window") + t.Logf("Detected prune past targetBlock. Now earliest=%d latest=%d window=%d", prunedStatus.Earliest, prunedStatus.Latest, currentProofWindow) + + // ----------------------------- + // (3) Post-prune consistency checks for getProof + // ----------------------------- + t.Logf("Post-prune: expecting getProof verification to succeed at new earliest block=%d", prunedStatus.Earliest) + utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, prunedStatus.Earliest) + utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, prunedStatus.Earliest) + t.Log("Post-prune: getProof consistency checks passed") +} + +type proofSyncStatus struct { + Earliest uint64 `json:"earliest"` + Latest uint64 `json:"latest"` +} + +func getProofSyncStatus(t devtest.T, client apis.EthClient) proofSyncStatus { + var result proofSyncStatus + err := client.RPC().CallContext(t.Ctx(), &result, "debug_proofsSyncStatus") + if err != nil { + t.Errorf("debug_proofsSyncStatus call failed: %v", err) + } + return result +} diff --git a/rust/op-reth/crates/tests/proofs/reorg/init_test.go b/rust/op-reth/crates/tests/proofs/reorg/init_test.go new file mode 100644 index 0000000000000..cfcf49cbe55a3 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/reorg/init_test.go @@ -0,0 +1,14 @@ +package reorg + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/op-rs/op-geth/proofs/utils" +) + +// TestMain creates the test-setups against the shared backend +func TestMain(m *testing.M) { + // Other setups may be added here, hydrated from the same orchestrator + presets.DoMain(m, utils.WithMixedOpProofPreset()) +} diff --git a/rust/op-reth/crates/tests/proofs/reorg/reorg_test.go b/rust/op-reth/crates/tests/proofs/reorg/reorg_test.go new file mode 100644 index 0000000000000..69e1e0718bd24 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/reorg/reorg_test.go @@ -0,0 +1,199 @@ +package reorg + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/op-rs/op-geth/proofs/utils" + "github.com/stretchr/testify/require" +) + +func TestReorgUsingAccountProof(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := utils.NewMixedOpProofPreset(t) + l := sys.Log + + ia := sys.TestSequencer.Escape().ControlAPI(sys.L2Chain.ChainID()) + + // stop batcher on chain A + sys.L2Batcher.Stop() + + // two EOAs for a sample transfer tx used later in a conflicting block + alice := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + bob := sys.FunderL2.NewFundedEOA(eth.OneHundredthEther) + + user := sys.FunderL2.NewFundedEOA(eth.OneEther) + contract, deployBlock := utils.DeploySimpleStorage(t, user) + t.Logf("SimpleStorage deployed at %s block=%d", contract.Address().Hex(), deployBlock.BlockNumber.Uint64()) + + time.Sleep(12 * time.Second) + divergenceHead := sys.L2Chain.WaitForBlock() + // build up some blocks that will be reorged away + + type caseEntry struct { + Block uint64 + addr common.Address + slots []common.Hash + } + var cases []caseEntry + + // deploy another contract in the reorged blocks + { + rContract, rDeployBlock := utils.DeploySimpleStorage(t, user) + t.Logf("Reorg SimpleStorage deployed at %s block=%d", rContract.Address().Hex(), rDeployBlock.BlockNumber.Uint64()) + + cases = append(cases, caseEntry{ + Block: rDeployBlock.BlockNumber.Uint64(), + addr: rContract.Address(), + slots: []common.Hash{common.HexToHash("0x0")}, + }) + } + + for i := 0; i < 3; i++ { + tx := alice.Transfer(bob.Address(), eth.OneGWei) + receipt, err := tx.Included.Eval(ctx) + require.NoError(gt, err) + require.Equal(gt, types.ReceiptStatusSuccessful, receipt.Status) + + cases = append(cases, caseEntry{ + Block: receipt.BlockNumber.Uint64(), + addr: alice.Address(), + slots: []common.Hash{}, + }) + cases = append(cases, caseEntry{ + Block: receipt.BlockNumber.Uint64(), + addr: bob.Address(), + slots: []common.Hash{}, + }) + + // also include the contract account in the proofs to verify + val := big.NewInt(int64(i * 10)) + callRes := contract.SetValue(user, val) + + cases = append(cases, caseEntry{ + Block: callRes.BlockNumber.Uint64(), + addr: contract.Address(), + slots: []common.Hash{common.HexToHash("0x0")}, + }) + } + + sys.L2CLSequencer.StopSequencer() + + var divergenceBlockNumber uint64 + var originalRef eth.L2BlockRef + // prepare and sequence a conflicting block for the L2A chain + { + divergenceBlockRef := sys.L2ELSequencerNode().BlockRefByNumber(divergenceHead.Number) + + l.Info("Expect to reorg the chain on block", "number", divergenceBlockRef.Number, "head", divergenceHead, "parent", divergenceBlockRef.ParentID().Hash) + divergenceBlockNumber = divergenceBlockRef.Number + originalRef = divergenceBlockRef + + parentOfDivergenceHead := divergenceBlockRef.ParentID() + + l.Info("Sequencing a conflicting block", "divergenceBlockRef", divergenceBlockRef, "parent", parentOfDivergenceHead) + + // sequence a conflicting block with a simple transfer tx, based on the parent of the parent of the unsafe head + { + err := ia.New(ctx, seqtypes.BuildOpts{ + Parent: parentOfDivergenceHead.Hash, + L1Origin: nil, + }) + require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") + + // include simple transfer tx in opened block + { + to := bob.PlanTransfer(alice.Address(), eth.OneGWei) + opt := txplan.Combine(to) + ptx := txplan.NewPlannedTx(opt) + signed_tx, err := ptx.Signed.Eval(ctx) + require.NoError(t, err, "Expected to be able to evaluate a planned transaction on op-test-sequencer, but got error") + txdata, err := signed_tx.MarshalBinary() + require.NoError(t, err, "Expected to be able to marshal a signed transaction on op-test-sequencer, but got error") + + err = ia.IncludeTx(ctx, txdata) + require.NoError(t, err, "Expected to be able to include a signed transaction on op-test-sequencer, but got error") + + cases = append(cases, caseEntry{ + Block: divergenceHead.Number, + addr: alice.Address(), + slots: []common.Hash{}, + }) + cases = append(cases, caseEntry{ + Block: divergenceHead.Number, + addr: bob.Address(), + slots: []common.Hash{}, + }) + } + + err = ia.Next(ctx) + require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") + } + } + + // start batcher on chain A + sys.L2Batcher.Start() + + // sequence a second block with op-test-sequencer (no L1 origin override) + { + l.Info("Sequencing with op-test-sequencer (no L1 origin override)") + err := ia.New(ctx, seqtypes.BuildOpts{ + Parent: sys.L2ELSequencerNode().BlockRefByLabel(eth.Unsafe).Hash, + L1Origin: nil, + }) + require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") + time.Sleep(2 * time.Second) + + err = ia.Next(ctx) + require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") + time.Sleep(2 * time.Second) + } + + // continue sequencing with consensus node (op-node) + sys.L2CLSequencer.StartSequencer() + + for i := 0; i < 3; i++ { + sys.L2Chain.WaitForBlock() + } + + latestBlock := sys.L2Chain.WaitForBlock() + sys.L2ELValidatorNode().WaitForBlockNumber(latestBlock.Number) + + // verify that the L2A validator has reorged and reached the latest block + err := wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { + blockRef, err := sys.L2ELValidatorNode().Escape().EthClient().BlockRefByNumber(ctx, latestBlock.Number) + if err != nil { + // this could happen if the validator is still syncing after reorg + l.Warn("Error fetching block reference from validator", "error", err) + return false, nil + } + return blockRef.Hash == latestBlock.Hash, nil + }) + require.NoError(t, err, "Expected block hash to match latest block hash on validator") + + reorgedRef_A, err := sys.L2ELSequencerNode().Escape().EthClient().BlockRefByNumber(ctx, divergenceBlockNumber) + require.NoError(t, err, "Expected to be able to call BlockRefByNumber API, but got error") + + l.Info("Reorged chain on divergence block number (prior the reorg)", "number", divergenceBlockNumber, "head", originalRef.Hash, "parent", originalRef.ParentID().Hash) + l.Info("Reorged chain on divergence block number (after the reorg)", "number", divergenceBlockNumber, "head", reorgedRef_A.Hash, "parent", reorgedRef_A.ParentID().Hash) + require.NotEqual(t, originalRef.Hash, reorgedRef_A.Hash, "Expected to get different heads on divergence block number, but got the same hash, so no reorg happened on chain A") + require.Equal(t, originalRef.ParentID().Hash, reorgedRef_A.ParentHash, "Expected to get same parent hashes on divergence block number, but got different hashes") + + time.Sleep(10 * time.Second) + + // verify that the accounts involved in the conflicting blocks + for i, c := range cases { + l.Info("Verifying proof", "case", i, "addr", c.addr.Hex(), "block", c.Block) + utils.FetchAndVerifyProofs(t, sys, c.addr, c.slots, c.Block) + } +} diff --git a/rust/op-reth/crates/tests/proofs/utils/contract.go b/rust/op-reth/crates/tests/proofs/utils/contract.go new file mode 100644 index 0000000000000..e4e5cffa49141 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/utils/contract.go @@ -0,0 +1,26 @@ +package utils + +import ( + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" +) + +type Contract struct { + address common.Address + parsedABI abi.ABI +} + +func NewContract(address common.Address, parsedABI abi.ABI) *Contract { + return &Contract{ + address: address, + parsedABI: parsedABI, + } +} + +func (c *Contract) Address() common.Address { + return c.address +} + +func (c *Contract) ABI() abi.ABI { + return c.parsedABI +} diff --git a/rust/op-reth/crates/tests/proofs/utils/multistorage.go b/rust/op-reth/crates/tests/proofs/utils/multistorage.go new file mode 100644 index 0000000000000..9c3ae260f1635 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/utils/multistorage.go @@ -0,0 +1,45 @@ +package utils + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +const MultiStorageArtifact = "../contracts/artifacts/MultiStorage.sol/MultiStorage.json" + +type MultiStorage struct { + *Contract + t devtest.T +} + +func (c *MultiStorage) SetValues(user *dsl.EOA, a, b *big.Int) *types.Receipt { + ctx := c.t.Ctx() + callData, err := c.parsedABI.Pack("setValues", a, b) + if err != nil { + require.NoError(c.t, err, "failed to pack set call data") + } + + callTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(callData)) + callRes, err := callTx.Included.Eval(ctx) + if err != nil { + require.NoError(c.t, err, "failed to create set tx") + } + + if callRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "set transaction failed") + } + + return callRes +} + +func DeployMultiStorage(t devtest.T, user *dsl.EOA) (*MultiStorage, *types.Receipt) { + parsedABI, bin := LoadArtifact(t, MultiStorageArtifact) + contractAddress, receipt := DeployContract(t, user, bin) + contract := NewContract(contractAddress, parsedABI) + return &MultiStorage{contract, t}, receipt +} diff --git a/rust/op-reth/crates/tests/proofs/utils/preset.go b/rust/op-reth/crates/tests/proofs/utils/preset.go new file mode 100644 index 0000000000000..166adec2edad2 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/utils/preset.go @@ -0,0 +1,372 @@ +package utils + +import ( + "os" + "strings" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type L2ELClient string + +const ( + L2ELClientGeth L2ELClient = "geth" + L2ELClientReth L2ELClient = "reth" + L2ELClientRethWithProofs L2ELClient = "reth-with-proof" +) + +type L2ELNodeID struct { + stack.L2ELNodeID + Client L2ELClient +} + +type L2ELNode struct { + *dsl.L2ELNode + Client L2ELClient +} + +type MixedOpProofPreset struct { + Log log.Logger + T devtest.T + ControlPlane stack.ControlPlane + + L1Network *dsl.L1Network + L1EL *dsl.L1ELNode + + L2Chain *dsl.L2Network + L2Batcher *dsl.L2Batcher + + L2ELSequencer *L2ELNode + L2CLSequencer *dsl.L2CLNode + + L2ELValidator *L2ELNode + L2CLValidator *dsl.L2CLNode + + Wallet *dsl.HDWallet + + FaucetL1 *dsl.Faucet + FaucetL2 *dsl.Faucet + FunderL1 *dsl.Funder + FunderL2 *dsl.Funder + + TestSequencer *dsl.TestSequencer +} + +func (m *MixedOpProofPreset) L2Network() *dsl.L2Network { + return m.L2Chain +} + +func (m *MixedOpProofPreset) L2ELSequencerNode() *dsl.L2ELNode { + return m.L2ELSequencer.L2ELNode +} + +func (m *MixedOpProofPreset) L2ELValidatorNode() *dsl.L2ELNode { + return m.L2ELValidator.L2ELNode +} + +// GethL2ELNode returns first L2 EL nodes that are running op-geth +func (m *MixedOpProofPreset) GethL2ELNode() *dsl.L2ELNode { + if m.L2ELSequencer.Client == L2ELClientGeth { + return m.L2ELSequencer.L2ELNode + } + + if m.L2ELValidator.Client == L2ELClientGeth { + return m.L2ELValidator.L2ELNode + } + + return nil +} + +// RethL2ELNode returns first L2 EL nodes that are running op-reth +func (m *MixedOpProofPreset) RethL2ELNode() *dsl.L2ELNode { + if m.L2ELSequencer.Client == L2ELClientReth { + return m.L2ELSequencer.L2ELNode + } + + if m.L2ELValidator.Client == L2ELClientReth { + return m.L2ELValidator.L2ELNode + } + return nil +} + +// RethWithProofL2ELNode returns first L2 EL nodes that are running op-reth with proof +func (m *MixedOpProofPreset) RethWithProofL2ELNode() *dsl.L2ELNode { + if m.L2ELSequencer.Client == L2ELClientRethWithProofs { + return m.L2ELSequencer.L2ELNode + } + + if m.L2ELValidator.Client == L2ELClientRethWithProofs { + return m.L2ELValidator.L2ELNode + } + return nil +} + +func WithMixedOpProofPreset() stack.CommonOption { + return stack.MakeCommon(DefaultMixedOpProofSystem(&DefaultMixedOpProofSystemIDs{})) +} + +func L2NodeMatcher[ + I interface { + comparable + Key() string + }, E stack.Identifiable[I]](value ...string) stack.Matcher[I, E] { + return match.MatchElemFn[I, E](func(elem E) bool { + for _, v := range value { + if !strings.Contains(elem.ID().Key(), v) { + return false + } + } + return true + }) +} + +func NewMixedOpProofPreset(t devtest.T) *MixedOpProofPreset { + system := shim.NewSystem(t) + orch := presets.Orchestrator() + orch.Hydrate(system) + + t.Gate().Equal(len(system.L2Networks()), 1, "expected exactly one L2 network") + t.Gate().Equal(len(system.L1Networks()), 1, "expected exactly one L1 network") + + l1Net := system.L1Network(match.FirstL1Network) + l2Net := system.L2Network(match.Assume(t, match.L2ChainA)) + + t.Gate().GreaterOrEqual(len(l2Net.L2CLNodes()), 2, "expected at least two L2CL nodes") + + sequencerCL := l2Net.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) + sequencerELInner := l2Net.L2ELNode(match.Assume(t, match.EngineFor(sequencerCL))) + var sequencerEL *L2ELNode + if strings.Contains(sequencerELInner.ID().String(), "op-reth-with-proof") { + sequencerEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), + Client: L2ELClientRethWithProofs, + } + } else if strings.Contains(sequencerELInner.ID().String(), "op-reth") { + sequencerEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), + Client: L2ELClientReth, + } + } else if strings.Contains(sequencerELInner.ID().String(), "op-geth") { + sequencerEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), + Client: L2ELClientGeth, + } + } else { + t.Error("unexpected L2EL client for sequencer") + t.FailNow() + } + + verifierCL := l2Net.L2CLNode(match.Assume(t, + match.And( + match.Not(match.WithSequencerActive(t.Ctx())), + match.Not(sequencerCL.ID()), + ))) + verifierELInner := l2Net.L2ELNode(match.Assume(t, + match.And( + match.EngineFor(verifierCL), + match.Not(sequencerEL.ID()), + ))) + var verifierEL *L2ELNode + if strings.Contains(verifierELInner.ID().String(), "op-reth-with-proof") { + verifierEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), + Client: L2ELClientRethWithProofs, + } + } else if strings.Contains(verifierELInner.ID().String(), "op-reth") { + verifierEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), + Client: L2ELClientReth, + } + } else if strings.Contains(verifierELInner.ID().String(), "op-geth") { + verifierEL = &L2ELNode{ + L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), + Client: L2ELClientGeth, + } + } else { + t.Error("unexpected L2EL client for verifier") + t.FailNow() + } + + out := &MixedOpProofPreset{ + Log: t.Logger(), + T: t, + ControlPlane: orch.ControlPlane(), + L1Network: dsl.NewL1Network(l1Net), + L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), + L2Chain: dsl.NewL2Network(l2Net, orch.ControlPlane()), + L2Batcher: dsl.NewL2Batcher(l2Net.L2Batcher(match.Assume(t, match.FirstL2Batcher))), + L2ELSequencer: sequencerEL, + L2CLSequencer: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), + L2ELValidator: verifierEL, + L2CLValidator: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), + Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation + FaucetL2: dsl.NewFaucet(l2Net.Faucet(match.Assume(t, match.FirstFaucet))), + + TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), + } + out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) + out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) + out.FunderL2 = dsl.NewFunder(out.Wallet, out.FaucetL2, out.L2ELSequencer) + return out +} + +type DefaultMixedOpProofSystemIDs struct { + L1 stack.L1NetworkID + L1EL stack.L1ELNodeID + L1CL stack.L1CLNodeID + + L2 stack.L2NetworkID + + L2CLSequencer stack.L2CLNodeID + L2ELSequencer L2ELNodeID + + L2CLValidator stack.L2CLNodeID + L2ELValidator L2ELNodeID + + L2Batcher stack.L2BatcherID + L2Proposer stack.L2ProposerID + L2Challenger stack.L2ChallengerID + + TestSequencer stack.TestSequencerID +} + +func NewDefaultMixedOpProofSystemIDs(l1ID, l2ID eth.ChainID) DefaultMixedOpProofSystemIDs { + ids := DefaultMixedOpProofSystemIDs{ + L1: stack.L1NetworkID(l1ID), + L1EL: stack.NewL1ELNodeID("l1", l1ID), + L1CL: stack.NewL1CLNodeID("l1", l1ID), + L2: stack.L2NetworkID(l2ID), + L2CLSequencer: stack.NewL2CLNodeID("sequencer", l2ID), + L2CLValidator: stack.NewL2CLNodeID("validator", l2ID), + L2Batcher: stack.NewL2BatcherID("main", l2ID), + L2Proposer: stack.NewL2ProposerID("main", l2ID), + L2Challenger: stack.NewL2ChallengerID("main", l2ID), + TestSequencer: "test-sequencer", + } + + // default to op-geth for sequencer and op-reth-with-proof for validator + switch os.Getenv("OP_DEVSTACK_PROOF_SEQUENCER_EL") { + case "op-reth-with-proof": + ids.L2ELSequencer = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-reth-with-proof", l2ID), + Client: L2ELClientRethWithProofs, + } + case "op-reth": + ids.L2ELSequencer = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-reth", l2ID), + Client: L2ELClientReth, + } + default: + ids.L2ELSequencer = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-geth", l2ID), + Client: L2ELClientGeth, + } + } + + switch os.Getenv("OP_DEVSTACK_PROOF_VALIDATOR_EL") { + case "op-geth": + ids.L2ELValidator = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("validator-op-geth", l2ID), + Client: L2ELClientGeth, + } + case "op-reth": + ids.L2ELValidator = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("validator-op-reth", l2ID), + Client: L2ELClientReth, + } + default: + ids.L2ELValidator = L2ELNodeID{ + L2ELNodeID: stack.NewL2ELNodeID("validator-op-reth-with-proof", l2ID), + Client: L2ELClientRethWithProofs, + } + } + + return ids +} + +func DefaultMixedOpProofSystem(dest *DefaultMixedOpProofSystemIDs) stack.Option[*sysgo.Orchestrator] { + ids := NewDefaultMixedOpProofSystemIDs(sysgo.DefaultL1ID, sysgo.DefaultL2AID) + return defaultMixedOpProofSystemOpts(&ids, dest) +} + +func defaultMixedOpProofSystemOpts(src, dest *DefaultMixedOpProofSystemIDs) stack.CombinedOption[*sysgo.Orchestrator] { + opt := stack.Combine[*sysgo.Orchestrator]() + opt.Add(stack.BeforeDeploy(func(o *sysgo.Orchestrator) { + o.P().Logger().Info("Setting up") + })) + + opt.Add(sysgo.WithMnemonicKeys(devkeys.TestMnemonic)) + + // Get artifacts path + artifactsPath := os.Getenv("OP_DEPLOYER_ARTIFACTS") + if artifactsPath == "" { + panic("OP_DEPLOYER_ARTIFACTS is not set") + } + + opt.Add(sysgo.WithDeployer(), + sysgo.WithDeployerPipelineOption( + sysgo.WithDeployerCacheDir(artifactsPath), + ), + sysgo.WithDeployerOptions( + func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + builder.WithL1ContractsLocator(artifacts.MustNewFileLocator(artifactsPath)) + builder.WithL2ContractsLocator(artifacts.MustNewFileLocator(artifactsPath)) + }, + sysgo.WithCommons(src.L1.ChainID()), + sysgo.WithPrefundedL2(src.L1.ChainID(), src.L2.ChainID()), + ), + ) + + opt.Add(sysgo.WithL1Nodes(src.L1EL, src.L1CL)) + + // Spawn L2 sequencer nodes + switch src.L2ELSequencer.Client { + case L2ELClientRethWithProofs: + opt.Add(sysgo.WithOpReth(src.L2ELSequencer.L2ELNodeID, sysgo.L2ELWithProofHistory(true))) + case L2ELClientReth: + opt.Add(sysgo.WithOpReth(src.L2ELSequencer.L2ELNodeID)) + case L2ELClientGeth: + opt.Add(sysgo.WithOpGeth(src.L2ELSequencer.L2ELNodeID)) + default: + panic("unknown L2 EL client for sequencer") + } + opt.Add(sysgo.WithL2CLNode(src.L2CLSequencer, src.L1CL, src.L1EL, src.L2ELSequencer.L2ELNodeID, sysgo.L2CLSequencer())) + + // Spawn L2 validator nodes + switch src.L2ELValidator.Client { + case L2ELClientRethWithProofs: + opt.Add(sysgo.WithOpReth(src.L2ELValidator.L2ELNodeID, sysgo.L2ELWithProofHistory(true))) + case L2ELClientReth: + opt.Add(sysgo.WithOpReth(src.L2ELValidator.L2ELNodeID)) + case L2ELClientGeth: + opt.Add(sysgo.WithOpGeth(src.L2ELValidator.L2ELNodeID)) + default: + panic("unknown L2 EL client for validator") + } + opt.Add(sysgo.WithL2CLNode(src.L2CLValidator, src.L1CL, src.L1EL, src.L2ELValidator.L2ELNodeID)) + + opt.Add(sysgo.WithBatcher(src.L2Batcher, src.L1EL, src.L2CLSequencer, src.L2ELSequencer.L2ELNodeID)) + opt.Add(sysgo.WithProposer(src.L2Proposer, src.L1EL, &src.L2CLSequencer, nil)) + + opt.Add(sysgo.WithFaucets([]stack.L1ELNodeID{src.L1EL}, []stack.L2ELNodeID{src.L2ELSequencer.L2ELNodeID})) + + opt.Add(sysgo.WithTestSequencer(src.TestSequencer, src.L1CL, src.L2CLSequencer, src.L1EL, src.L2ELSequencer.L2ELNodeID)) + + opt.Add(stack.Finally(func(orch *sysgo.Orchestrator) { + *dest = *src + })) + + return opt +} diff --git a/rust/op-reth/crates/tests/proofs/utils/proof.go b/rust/op-reth/crates/tests/proofs/utils/proof.go new file mode 100644 index 0000000000000..89bf42f2b3821 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/utils/proof.go @@ -0,0 +1,154 @@ +package utils + +import ( + "bytes" + "fmt" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/require" +) + +// NormalizeProofResponse standardizes an AccountResult obtained from eth_getProof +// across different client implementations (e.g., Geth, Reth) so that they can be +// compared meaningfully in tests. +// +// Ethereum clients may encode empty or zeroed data structures differently while +// still representing the same logical state. For example: +// - An empty storage proof may appear as [] (Geth) or ["0x80"] (Reth). +// +// This function normalizes such differences by: +// - Converting single-element proofs containing "0x80" to an empty proof slice. +func NormalizeProofResponse(res *eth.AccountResult) { + for i := range res.StorageProof { + if len(res.StorageProof[i].Proof) == 1 && bytes.Equal(res.StorageProof[i].Proof[0], []byte{0x80}) { + res.StorageProof[i].Proof = []hexutil.Bytes{} + } + } + + // Normalize empty CodeHash + // Geth returns 0x0000000000000000000000000000000000000000000000000000000000000000 + // Reth returns 0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470 + if res.CodeHash == (common.Hash{}) { + res.CodeHash = crypto.Keccak256Hash(nil) + } + + // Normalize empty StorageHash + // Geth returns 0x0000000000000000000000000000000000000000000000000000000000000000 + // Reth returns 0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421 + if res.StorageHash == (common.Hash{}) { + res.StorageHash = types.EmptyRootHash + } +} + +// VerifyProof verifies an account and its storage proofs against a given state root. +// +// This function extends the standard behavior of go-ethereum’s AccountResult.Verify() +// by gracefully handling the case where the account’s storage trie root is empty +// (0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421). +func VerifyProof(res *eth.AccountResult, stateRoot common.Hash) error { + // Skip storage proof verification if the storage trie is empty. + if res.StorageHash != types.EmptyRootHash { + for i, entry := range res.StorageProof { + // load all MPT nodes into a DB + db := memorydb.New() + for j, encodedNode := range entry.Proof { + nodeKey := encodedNode + if len(encodedNode) >= 32 { // small MPT nodes are not hashed + nodeKey = crypto.Keccak256(encodedNode) + } + if err := db.Put(nodeKey, encodedNode); err != nil { + return fmt.Errorf("failed to load storage proof node %d of storage value %d into mem db: %w", j, i, err) + } + } + path := crypto.Keccak256(entry.Key) + val, err := trie.VerifyProof(res.StorageHash, path, db) + if err != nil { + return fmt.Errorf("failed to verify storage value %d with key %s (path %x) in storage trie %s: %w", i, entry.Key.String(), path, res.StorageHash, err) + } + if val == nil && entry.Value.ToInt().Cmp(common.Big0) == 0 { // empty storage is zero by default + continue + } + comparison, err := rlp.EncodeToBytes(entry.Value.ToInt().Bytes()) + if err != nil { + return fmt.Errorf("failed to encode storage value %d with key %s (path %x) in storage trie %s: %w", i, entry.Key.String(), path, res.StorageHash, err) + } + if !bytes.Equal(val, comparison) { + return fmt.Errorf("value %d in storage proof does not match proven value at key %s (path %x)", i, entry.Key.String(), path) + } + } + } + + accountClaimed := []any{uint64(res.Nonce), res.Balance.ToInt().Bytes(), res.StorageHash, res.CodeHash} + accountClaimedValue, err := rlp.EncodeToBytes(accountClaimed) + if err != nil { + return fmt.Errorf("failed to encode account from retrieved values: %w", err) + } + + // create a db with all account trie nodes + db := memorydb.New() + for i, encodedNode := range res.AccountProof { + nodeKey := encodedNode + if len(encodedNode) >= 32 { // small MPT nodes are not hashed + nodeKey = crypto.Keccak256(encodedNode) + } + if err := db.Put(nodeKey, encodedNode); err != nil { + return fmt.Errorf("failed to load account proof node %d into mem db: %w", i, err) + } + } + path := crypto.Keccak256(res.Address[:]) + accountProofValue, err := trie.VerifyProof(stateRoot, path, db) + if err != nil { + return fmt.Errorf("failed to verify account value with key %s (path %x) in account trie %s: %w", res.Address, path, stateRoot, err) + } + + // If the proof demonstrates non-existence (nil value), we must check if the RPC claimed the account is empty. + if len(accountProofValue) == 0 { + isEmpty := res.Nonce == 0 && + res.Balance.ToInt().Sign() == 0 && + (res.StorageHash == types.EmptyRootHash || res.StorageHash == common.Hash{}) && + (res.CodeHash == crypto.Keccak256Hash(nil) || res.CodeHash == common.Hash{}) + + if isEmpty { + return nil + } + } + + if !bytes.Equal(accountClaimedValue, accountProofValue) { + return fmt.Errorf("L1 RPC is tricking us, account proof does not match provided deserialized values:\n"+ + " claimed: %x\n"+ + " proof: %x", accountClaimedValue, accountProofValue) + } + return nil +} + +// FetchAndVerifyProofs fetches account proofs from both L2EL and L2ELB for the given address +func FetchAndVerifyProofs(t devtest.T, sys *MixedOpProofPreset, address common.Address, slots []common.Hash, block uint64) { + ctx := t.Ctx() + blockInfo, err := sys.L2ELSequencerNode().Escape().L2EthClient().InfoByNumber(ctx, block) + require.NoError(t, err, "failed to get block info for block %d", block) + + seqProofRes, err := sys.L2ELSequencerNode().Escape().L2EthClient().GetProof(ctx, address, slots, hexutil.Uint64(block).String()) + require.NoError(t, err, "failed to get proof from L2EL at block %d", block) + + valProofRes, err := sys.L2ELValidatorNode().Escape().L2EthClient().GetProof(ctx, address, slots, hexutil.Uint64(block).String()) + require.NoError(t, err, "failed to get proof from L2ELB at block %d", block) + + NormalizeProofResponse(seqProofRes) + NormalizeProofResponse(valProofRes) + + require.Equal(t, seqProofRes, valProofRes, "sequencer and validator proofs should match") + + err = VerifyProof(seqProofRes, blockInfo.Root()) + require.NoError(t, err, "geth proof verification failed at block %d", block) + + err = VerifyProof(valProofRes, blockInfo.Root()) + require.NoError(t, err, "reth proof verification failed at block %d", block) +} diff --git a/rust/op-reth/crates/tests/proofs/utils/simplestorage.go b/rust/op-reth/crates/tests/proofs/utils/simplestorage.go new file mode 100644 index 0000000000000..2e080ff9ad9ef --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/utils/simplestorage.go @@ -0,0 +1,54 @@ +package utils + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +const SimpleStorageArtifact = "../contracts/artifacts/SimpleStorage.sol/SimpleStorage.json" + +type SimpleStorage struct { + *Contract + t devtest.T +} + +func (c *SimpleStorage) SetValue(user *dsl.EOA, value *big.Int) *types.Receipt { + ctx := c.t.Ctx() + callData, err := c.parsedABI.Pack("setValue", value) + if err != nil { + require.NoError(c.t, err, "failed to pack set call data") + } + + callTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(callData)) + callRes, err := callTx.Included.Eval(ctx) + if err != nil { + require.NoError(c.t, err, "failed to create set tx") + } + + if callRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "set transaction failed") + } + return callRes +} + +func (c *SimpleStorage) PlanSetValue(user *dsl.EOA, value *big.Int) *txplan.PlannedTx { + callData, err := c.parsedABI.Pack("setValue", value) + if err != nil { + require.NoError(c.t, err, "failed to pack set call data") + } + + callTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(callData)) + return callTx +} + +func DeploySimpleStorage(t devtest.T, user *dsl.EOA) (*SimpleStorage, *types.Receipt) { + parsedABI, bin := LoadArtifact(t, SimpleStorageArtifact) + contractAddress, receipt := DeployContract(t, user, bin) + contract := NewContract(contractAddress, parsedABI) + return &SimpleStorage{contract, t}, receipt +} diff --git a/rust/op-reth/crates/tests/proofs/utils/tokenvault.go b/rust/op-reth/crates/tests/proofs/utils/tokenvault.go new file mode 100644 index 0000000000000..1c1fbe2d12ae8 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/utils/tokenvault.go @@ -0,0 +1,106 @@ +package utils + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +const TokenVaultArtifact = "../contracts/artifacts/TokenVault.sol/TokenVault.json" +const BalanceSlotIndex = 0 +const AllowanceSlotIndex = 1 +const DepositorSlotIndex = 2 + +type TokenVault struct { + *Contract + t devtest.T +} + +func (c *TokenVault) Deposit(user *dsl.EOA, amount eth.ETH) *types.Receipt { + depositCalldata, err := c.Contract.parsedABI.Pack("deposit") + if err != nil { + require.NoError(c.t, err, "failed to pack deposit calldata") + } + depTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(depositCalldata), txplan.WithValue(amount)) + depRes, err := depTx.Included.Eval(c.t.Ctx()) + if err != nil { + require.NoError(c.t, err, "deposit tx failed") + } + + if depRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "deposit transaction failed") + } + + return depRes +} + +func (c *TokenVault) Approve(user *dsl.EOA, spender common.Address, amount *big.Int) *types.Receipt { + approveCalldata, err := c.Contract.parsedABI.Pack("approve", spender, amount) + if err != nil { + require.NoError(c.t, err, "failed to pack approve calldata") + } + + approveTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(approveCalldata)) + approveRes, err := approveTx.Included.Eval(c.t.Ctx()) + if err != nil { + require.NoError(c.t, err, "approve tx failed") + } + + if approveRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "approve transaction failed") + } + return approveRes +} + +func (c *TokenVault) DeactivateAllowance(user *dsl.EOA, spender common.Address) *types.Receipt { + deactCalldata, err := c.Contract.parsedABI.Pack("deactivateAllowance", spender) + if err != nil { + require.NoError(c.t, err, "failed to pack deactivateAllowance calldata") + } + deactTx := txplan.NewPlannedTx(user.Plan(), txplan.WithTo(&c.Contract.address), txplan.WithData(deactCalldata)) + deactRes, err := deactTx.Included.Eval(c.t.Ctx()) + if err != nil { + require.NoError(c.t, err, "deactivateAllowance tx failed") + } + + if deactRes.Status != types.ReceiptStatusSuccessful { + require.NoError(c.t, err, "deactivateAllowance transaction failed") + } + return deactRes +} + +func (c *TokenVault) GetBalanceSlot(user common.Address) common.Hash { + keyBytes := common.LeftPadBytes(user.Bytes(), 32) + slotBytes := common.LeftPadBytes(new(big.Int).SetUint64(BalanceSlotIndex).Bytes(), 32) + return crypto.Keccak256Hash(append(keyBytes, slotBytes...)) +} + +func (c *TokenVault) GetAllowanceSlot(owner, spender common.Address) common.Hash { + ownerBytes := common.LeftPadBytes(owner.Bytes(), 32) + slotBytes := common.LeftPadBytes(new(big.Int).SetUint64(AllowanceSlotIndex).Bytes(), 32) + inner := crypto.Keccak256(ownerBytes, slotBytes) + spenderBytes := common.LeftPadBytes(spender.Bytes(), 32) + return crypto.Keccak256Hash(append(spenderBytes, inner...)) +} + +func (c *TokenVault) GetDepositorSlot(index uint64) common.Hash { + slotBytes := common.LeftPadBytes(new(big.Int).SetUint64(DepositorSlotIndex).Bytes(), 32) + base := crypto.Keccak256(slotBytes) + baseInt := new(big.Int).SetBytes(base) + elem := new(big.Int).Add(baseInt, new(big.Int).SetUint64(index)) + return common.BigToHash(elem) +} + +func DeployTokenVault(t devtest.T, user *dsl.EOA) (*TokenVault, *types.Receipt) { + parsedABI, bin := LoadArtifact(t, TokenVaultArtifact) + contractAddress, receipt := DeployContract(t, user, bin) + contract := NewContract(contractAddress, parsedABI) + return &TokenVault{contract, t}, receipt +} diff --git a/rust/op-reth/crates/tests/proofs/utils/utils.go b/rust/op-reth/crates/tests/proofs/utils/utils.go new file mode 100644 index 0000000000000..e47fdc47e15b8 --- /dev/null +++ b/rust/op-reth/crates/tests/proofs/utils/utils.go @@ -0,0 +1,66 @@ +package utils + +import ( + "encoding/json" + "os" + "strings" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +// minimal parts of artifact +type Artifact struct { + ABI json.RawMessage `json:"abi"` + Bytecode struct { + Object string `json:"object"` + } `json:"bytecode"` +} + +// LoadArtifact reads the forge artifact JSON at artifactPath and returns the parsed ABI +// and the creation bytecode (as bytes). It prefers bytecode.object (creation) and falls +// back to deployedBytecode.object if needed. +func LoadArtifact(t devtest.T, artifactPath string) (abi.ABI, []byte) { + data, err := os.ReadFile(artifactPath) + if err != nil { + require.NoError(t, err, "failed to read artifact file") + } + + var art Artifact + if err := json.Unmarshal(data, &art); err != nil { + require.NoError(t, err, "failed to unmarshal artifact JSON") + } + + parsedABI, err := abi.JSON(strings.NewReader(string(art.ABI))) + if err != nil { + require.NoError(t, err, "failed to parse contract ABI") + } + + binHex := strings.TrimSpace(art.Bytecode.Object) + if binHex == "" { + require.NoError(t, err, "artifact has no bytecode") + } + + return parsedABI, common.FromHex(binHex) +} + +// DeployContract deploys the contract creation bytecode from the given artifact. +// user must provide a Plan() method compatible with txplan.NewPlannedTx (kept generic). +func DeployContract(t devtest.T, user *dsl.EOA, bin []byte) (common.Address, *types.Receipt) { + tx := txplan.NewPlannedTx(user.Plan(), txplan.WithData(bin)) + res, err := tx.Included.Eval(t.Ctx()) + if err != nil { + require.NoError(t, err, "contract deployment tx failed") + } + + if res.Status != types.ReceiptStatusSuccessful { + require.NoError(t, err, "contract deployment transaction failed") + } + + return res.ContractAddress, res +} diff --git a/rust/op-reth/crates/tests/scripts/op-reth-entrypoint.sh b/rust/op-reth/crates/tests/scripts/op-reth-entrypoint.sh new file mode 100644 index 0000000000000..08b9b1385ff32 --- /dev/null +++ b/rust/op-reth/crates/tests/scripts/op-reth-entrypoint.sh @@ -0,0 +1,69 @@ +#!/bin/sh +set -e + +# Variables to extract +DATADIR="" +PROOFS_PATH="" +CHAIN="" + +# Helper: require a value after flag +require_value() { + if [ -z "$2" ] || printf "%s" "$2" | grep -q "^--"; then + echo "ERROR: Missing value for $1" >&2 + exit 1 + fi +} + +# Parse arguments using a prev_flag pattern to avoid eval +prev_flag="" +for arg in "$@"; do + if [ -n "$prev_flag" ]; then + require_value "$prev_flag" "$arg" + case "$prev_flag" in + --datadir) DATADIR="$arg" ;; + --proofs-history.storage-path) PROOFS_PATH="$arg" ;; + --chain) CHAIN="$arg" ;; + esac + prev_flag="" + continue + fi + + case "$arg" in + --datadir=*) + DATADIR="${arg#*=}" + ;; + --datadir) + prev_flag="$arg" + ;; + --proofs-history.storage-path=*) + PROOFS_PATH="${arg#*=}" + ;; + --proofs-history.storage-path) + prev_flag="$arg" + ;; + --chain=*) + CHAIN="${arg#*=}" + ;; + --chain) + prev_flag="$arg" + ;; + esac +done + +# Check if a flag was left without a value at the end +if [ -n "$prev_flag" ]; then + echo "ERROR: Missing value for $prev_flag" >&2 + exit 1 +fi + +# Log extracted values +echo "extracted --datadir: ${DATADIR:-}" +echo "extracted --proofs-history.storage-path: ${PROOFS_PATH:-}" +echo "extracted --chain: ${CHAIN:-}" + +echo "Initializing op-reth" +op-reth init --datadir="$DATADIR" --chain="$CHAIN" +echo "Initializing op-reth proofs" +op-reth proofs init --datadir="$DATADIR" --chain="$CHAIN" --proofs-history.storage-path="$PROOFS_PATH" +echo "Starting op-reth with args: $*" +op-reth "$@" diff --git a/rust/op-reth/crates/trie/Cargo.toml b/rust/op-reth/crates/trie/Cargo.toml new file mode 100644 index 0000000000000..3cf894c579b7f --- /dev/null +++ b/rust/op-reth/crates/trie/Cargo.toml @@ -0,0 +1,98 @@ +[package] +name = "reth-optimism-trie" +version = "1.10.2" +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage = "https://paradigmxyz.github.io/reth" +repository = "https://github.com/paradigmxyz/reth" +description = "Trie node storage for serving proofs in FP window fast" + +[lints] +workspace = true + +[dependencies] +# reth +reth-db = { workspace = true, features = ["mdbx"] } +reth-evm.workspace = true +reth-execution-errors.workspace = true +reth-primitives-traits.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-trie = { workspace = true, features = ["serde"] } +reth-trie-common = { workspace = true, features = ["serde"] } +reth-tasks.workspace = true + +# workaround: reth-trie/serde-bincode-compat activates serde-bincode-compat on +# reth-ethereum-primitives (a transitive dep) without also activating its serde feature, +# breaking compilation. Adding it as an optional dep lets us enable both features together. +reth-ethereum-primitives = { workspace = true, optional = true } + +# `metrics` feature +metrics = { workspace = true, optional = true } +reth-metrics = { workspace = true, features = ["common"], optional = true } + +# ethereum +alloy-primitives.workspace = true +alloy-eips.workspace = true + +# async +tokio = { workspace = true, features = ["sync"] } + +# codec +bytes.workspace = true +serde.workspace = true +bincode.workspace = true + +# misc +parking_lot.workspace = true +thiserror.workspace = true +auto_impl.workspace = true +eyre = { workspace = true, optional = true } +strum.workspace = true +tracing.workspace = true +derive_more.workspace = true + +[dev-dependencies] +reth-codecs = { workspace = true, features = ["test-utils"] } +tempfile.workspace = true +tokio = { workspace = true, features = ["test-util", "rt-multi-thread", "macros"] } +test-case.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } +# workaround for failing doc test +reth-db-api = { workspace = true, features = ["test-utils"] } +reth-trie = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-node-api.workspace = true +alloy-consensus.workspace = true +alloy-genesis.workspace = true +reth-chainspec.workspace = true +reth-db-common.workspace = true +reth-ethereum-primitives.workspace = true +reth-evm-ethereum.workspace = true +reth-storage-errors.workspace = true +secp256k1 = { workspace = true, features = ["rand", "std"] } +mockall.workspace = true +eyre.workspace = true + +# misc +serial_test.workspace = true + +[features] +serde-bincode-compat = [ + "reth-primitives-traits/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "reth-trie/serde-bincode-compat", + "dep:reth-ethereum-primitives", + "reth-ethereum-primitives?/serde-bincode-compat", + "reth-ethereum-primitives?/serde", +] +metrics = [ + "reth-trie/metrics", + "dep:reth-metrics", + "dep:metrics", + "dep:eyre", + "reth-evm/metrics" +] diff --git a/rust/op-reth/crates/trie/src/api.rs b/rust/op-reth/crates/trie/src/api.rs new file mode 100644 index 0000000000000..58f943b810806 --- /dev/null +++ b/rust/op-reth/crates/trie/src/api.rs @@ -0,0 +1,231 @@ +//! Storage API for external storage of intermediary trie nodes. + +use crate::{ + OpProofsStorageResult, + db::{HashedStorageKey, StorageTrieKey}, +}; +use alloy_eips::{BlockNumHash, eip1898::BlockWithParent}; +use alloy_primitives::{B256, U256}; +use auto_impl::auto_impl; +use derive_more::{AddAssign, Constructor}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, +}; +use reth_trie_common::{ + BranchNodeCompact, HashedPostStateSorted, Nibbles, StoredNibbles, updates::TrieUpdatesSorted, +}; +use std::{fmt::Debug, time::Duration}; + +/// Diff of trie updates and post state for a block. +#[derive(Debug, Clone, Default)] +pub struct BlockStateDiff { + /// Trie updates for branch nodes + pub sorted_trie_updates: TrieUpdatesSorted, + /// Post state for leaf nodes (accounts and storage) + pub sorted_post_state: HashedPostStateSorted, +} + +impl BlockStateDiff { + /// Extend the [` BlockStateDiff`] from other latest [`BlockStateDiff`] + pub fn extend_ref(&mut self, other: &Self) { + self.sorted_trie_updates.extend_ref_and_sort(&other.sorted_trie_updates); + self.sorted_post_state.extend_ref_and_sort(&other.sorted_post_state); + } +} + +/// Counts of trie updates written to storage. +#[derive(Debug, Clone, Default, AddAssign, Constructor, Eq, PartialEq)] +pub struct WriteCounts { + /// Number of account trie updates written + pub account_trie_updates_written_total: u64, + /// Number of storage trie updates written + pub storage_trie_updates_written_total: u64, + /// Number of hashed accounts written + pub hashed_accounts_written_total: u64, + /// Number of hashed storages written + pub hashed_storages_written_total: u64, +} + +/// Duration metrics for block processing. +#[derive(Debug, Default, Clone)] +pub struct OperationDurations { + /// Total time to process a block (end-to-end) in seconds + pub total_duration_seconds: Duration, + /// Time spent executing the block (EVM) in seconds + pub execution_duration_seconds: Duration, + /// Time spent calculating state root in seconds + pub state_root_duration_seconds: Duration, + /// Time spent writing trie updates to storage in seconds + pub write_duration_seconds: Duration, +} + +/// Trait for reading trie nodes from the database. +/// +/// Only leaf nodes and some branch nodes are stored. The bottom layer of branch nodes +/// are not stored to reduce write amplification. This matches Reth's non-historical trie storage. +#[auto_impl(Arc)] +pub trait OpProofsStore: Send + Sync + Debug { + /// Cursor for iterating over trie branches. + type StorageTrieCursor<'tx>: TrieStorageCursor + 'tx + where + Self: 'tx; + + /// Cursor for iterating over account trie branches. + type AccountTrieCursor<'tx>: TrieCursor + 'tx + where + Self: 'tx; + + /// Cursor for iterating over storage leaves. + type StorageCursor<'tx>: HashedStorageCursor + Send + Sync + 'tx + where + Self: 'tx; + + /// Cursor for iterating over account leaves. + type AccountHashedCursor<'tx>: HashedCursor + Send + Sync + 'tx + where + Self: 'tx; + + /// Get the earliest block number and hash that has been stored + /// + /// This is used to determine the block number of trie nodes with block number 0. + /// All earliest block numbers are stored in 0 to reduce updates required to prune trie nodes. + fn get_earliest_block_number(&self) -> OpProofsStorageResult>; + + /// Get the latest block number and hash that has been stored + fn get_latest_block_number(&self) -> OpProofsStorageResult>; + + /// Get a trie cursor for the storage backend + fn storage_trie_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get a trie cursor for the account backend + fn account_trie_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get a storage cursor for the storage backend + fn storage_hashed_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get an account hashed cursor for the storage backend + fn account_hashed_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Store a batch of trie updates. + /// + /// If wiped is true, the entire storage trie is wiped, but this is unsupported going forward, + /// so should only happen for legacy reasons. + fn store_trie_updates( + &self, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> OpProofsStorageResult; + + /// Fetch all updates for a given block number. + fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult; + + /// Applies [`BlockStateDiff`] to the earliest state (updating/deleting nodes) and updates the + /// earliest block number. + fn prune_earliest_state( + &self, + new_earliest_block_ref: BlockWithParent, + ) -> OpProofsStorageResult; + + /// Remove account, storage and trie updates from historical storage for all blocks till + /// the specified block (inclusive). + fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()>; + + /// Deletes all updates > `latest_common_block` and replaces them with the new updates. + fn replace_updates( + &self, + latest_common_block: BlockNumHash, + blocks_to_add: Vec<(BlockWithParent, BlockStateDiff)>, + ) -> OpProofsStorageResult<()>; + + /// Set the earliest block number and hash that has been stored + fn set_earliest_block_number(&self, block_number: u64, hash: B256) + -> OpProofsStorageResult<()>; +} + +/// Status of the initial state anchor. +#[derive(Debug, Clone, Copy, Default)] +pub enum InitialStateStatus { + /// Init isn't yet started + #[default] + NotStarted, + /// Init is in progress (some tables may already be populated) + InProgress, + /// Init completed successfully (all tables done + earliest block set) + Completed, +} + +/// Anchor for the initial state. +#[derive(Debug, Clone, Default)] +pub struct InitialStateAnchor { + /// The block for which the initial state is being initialized. None if initialization is not + /// yet started. + pub block: Option, + /// Whether initialization is still running or completed. + pub status: InitialStateStatus, + /// The latest key stored for `AccountTrieHistory`. + pub latest_account_trie_key: Option, + /// The latest key stored for `StorageTrieHistory`. + pub latest_storage_trie_key: Option, + /// The latest key stored for `HashedAccountHistory`. + pub latest_hashed_account_key: Option, + /// The latest key stored for `HashedStorageHistory`. + pub latest_hashed_storage_key: Option, +} + +/// Trait for storing and retrieving the initial state anchor. +#[auto_impl(Arc)] +pub trait OpProofsInitialStateStore: Send + Sync + Debug { + /// Read the current anchor. + fn initial_state_anchor(&self) -> OpProofsStorageResult; + + /// Create the anchor if it doesn't exist. + /// Returns `Err` if an anchor already exists (prevents accidental overwrite). + fn set_initial_state_anchor(&self, anchor: BlockNumHash) -> OpProofsStorageResult<()>; + + /// Store a batch of account trie branches. Used for saving existing state. For live state + /// capture, use [store_trie_updates](OpProofsStore::store_trie_updates). + fn store_account_branches( + &self, + account_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()>; + + /// Store a batch of storage trie branches. Used for saving existing state. + fn store_storage_branches( + &self, + hashed_address: B256, + storage_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()>; + + /// Store a batch of account trie leaf nodes. Used for saving existing state. + fn store_hashed_accounts( + &self, + accounts: Vec<(B256, Option)>, + ) -> OpProofsStorageResult<()>; + + /// Store a batch of storage trie leaf nodes. Used for saving existing state. + fn store_hashed_storages( + &self, + hashed_address: B256, + storages: Vec<(B256, U256)>, + ) -> OpProofsStorageResult<()>; + + /// Commit the initial state - mark the anchor as completed and also set the earliest block + /// number to anchor. + fn commit_initial_state(&self) -> OpProofsStorageResult; +} diff --git a/rust/op-reth/crates/trie/src/cursor.rs b/rust/op-reth/crates/trie/src/cursor.rs new file mode 100644 index 0000000000000..500d6e26ab538 --- /dev/null +++ b/rust/op-reth/crates/trie/src/cursor.rs @@ -0,0 +1,129 @@ +//! Implementation of [`HashedCursor`] and [`TrieCursor`] for +//! [`OpProofsStorage`](crate::OpProofsStorage). + +use alloy_primitives::{B256, U256}; +use derive_more::Constructor; +use reth_db::DatabaseError; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, +}; +use reth_trie_common::{BranchNodeCompact, Nibbles}; + +/// Manages reading storage or account trie nodes from [`TrieCursor`]. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsTrieCursor(pub C); + +impl TrieCursor for OpProofsTrieCursor +where + C: TrieCursor, +{ + #[inline] + fn seek_exact( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + self.0.seek_exact(key) + } + + #[inline] + fn seek( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn current(&mut self) -> Result, DatabaseError> { + self.0.current() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +impl TrieStorageCursor for OpProofsTrieCursor +where + C: TrieStorageCursor, +{ + #[inline] + fn set_hashed_address(&mut self, hashed_address: B256) { + self.0.set_hashed_address(hashed_address) + } +} + +/// Manages reading hashed account nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedAccountCursor(pub C); + +impl HashedCursor for OpProofsHashedAccountCursor +where + C: HashedCursor + Send + Sync, +{ + type Value = Account; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +/// Manages reading hashed storage nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedStorageCursor(pub C); + +impl HashedCursor for OpProofsHashedStorageCursor +where + C: HashedCursor + Send + Sync, +{ + type Value = U256; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +impl HashedStorageCursor for OpProofsHashedStorageCursor +where + C: HashedStorageCursor + Send + Sync, +{ + #[inline] + fn is_storage_empty(&mut self) -> Result { + self.0.is_storage_empty() + } + + #[inline] + fn set_hashed_address(&mut self, hashed_address: B256) { + self.0.set_hashed_address(hashed_address) + } +} diff --git a/rust/op-reth/crates/trie/src/cursor_factory.rs b/rust/op-reth/crates/trie/src/cursor_factory.rs new file mode 100644 index 0000000000000..ff47c9137cac7 --- /dev/null +++ b/rust/op-reth/crates/trie/src/cursor_factory.rs @@ -0,0 +1,100 @@ +//! Implements [`TrieCursorFactory`] and [`HashedCursorFactory`] for [`OpProofsStore`] types. + +use crate::{ + OpProofsHashedAccountCursor, OpProofsHashedStorageCursor, OpProofsStorage, OpProofsStore, + OpProofsTrieCursor, +}; +use alloy_primitives::B256; +use reth_db::DatabaseError; +use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; +use std::marker::PhantomData; + +/// Factory for creating trie cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsTrieCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsTrieCursorFactory<'tx, S> { + /// Initializes new `OpProofsTrieCursorFactory` + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> TrieCursorFactory for OpProofsTrieCursorFactory<'tx, S> +where + for<'a> S: OpProofsStore + 'tx, +{ + type AccountTrieCursor<'a> + = OpProofsTrieCursor> + where + Self: 'a; + type StorageTrieCursor<'a> + = OpProofsTrieCursor> + where + Self: 'a; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { + Ok(OpProofsTrieCursor::new( + self.storage + .account_trie_cursor(self.block_number) + .map_err(Into::::into)?, + )) + } + + fn storage_trie_cursor( + &self, + hashed_address: B256, + ) -> Result, DatabaseError> { + Ok(OpProofsTrieCursor::new( + self.storage + .storage_trie_cursor(hashed_address, self.block_number) + .map_err(Into::::into)?, + )) + } +} + +/// Factory for creating hashed account cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsHashedAccountCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsHashedAccountCursorFactory<'tx, S> { + /// Creates a new `OpProofsHashedAccountCursorFactory` instance. + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> HashedCursorFactory for OpProofsHashedAccountCursorFactory<'tx, S> +where + S: OpProofsStore + 'tx, +{ + type AccountCursor<'a> + = OpProofsHashedAccountCursor> + where + Self: 'a; + type StorageCursor<'a> + = OpProofsHashedStorageCursor> + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { + Ok(OpProofsHashedAccountCursor::new(self.storage.account_hashed_cursor(self.block_number)?)) + } + + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result, DatabaseError> { + Ok(OpProofsHashedStorageCursor::new( + self.storage.storage_hashed_cursor(hashed_address, self.block_number)?, + )) + } +} diff --git a/rust/op-reth/crates/trie/src/db/cursor.rs b/rust/op-reth/crates/trie/src/db/cursor.rs new file mode 100644 index 0000000000000..b4551eadf1340 --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/cursor.rs @@ -0,0 +1,1465 @@ +use std::marker::PhantomData; + +use crate::{ + OpProofsStorageResult, + db::{ + AccountTrieHistory, HashedAccountHistory, HashedStorageHistory, HashedStorageKey, + MaybeDeleted, StorageTrieHistory, StorageTrieKey, VersionedValue, + }, +}; +use alloy_primitives::{B256, U256}; +use reth_db::{ + Database, DatabaseEnv, DatabaseError, + cursor::{DbCursorRO, DbDupCursorRO}, + table::{DupSort, Table}, + transaction::DbTx, +}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, +}; +use reth_trie_common::{BranchNodeCompact, Nibbles, StoredNibbles}; + +/// Generic alias for dup cursor for T +pub(crate) type Dup<'tx, T> = <::TX as DbTx>::DupCursor; + +/// Iterates versioned dup-sorted rows and returns the latest value (<= `max_block_number`), +/// skipping tombstones. +#[derive(Debug, Clone)] +pub struct BlockNumberVersionedCursor { + _table: PhantomData, + cursor: Cursor, + max_block_number: u64, +} + +impl BlockNumberVersionedCursor +where + T: Table> + DupSort, + Cursor: DbCursorRO + DbDupCursorRO, +{ + /// Initializes new [`BlockNumberVersionedCursor`]. + pub const fn new(cursor: Cursor, max_block_number: u64) -> Self { + Self { _table: PhantomData, cursor, max_block_number } + } + + /// Check if the cursor is currently positioned at a valid row. + fn is_positioned(&mut self) -> OpProofsStorageResult { + Ok(self.cursor.current()?.is_some()) + } + + /// Resolve the latest version for `key` with `block_number` <= `max_block_number`. + /// Strategy: + /// - `seek_by_key_subkey(key, max)` gives first dup >= max. + /// - if exactly == max → it's our latest + /// - if > max → `prev_dup()` is latest < max (or None) + /// - if no dup >= max: + /// - if key exists → `last_dup()` is latest < max + /// - else → None + fn latest_version_for_key( + &mut self, + key: T::Key, + ) -> OpProofsStorageResult> { + // First dup with subkey >= max_block_number + let seek_res = self.cursor.seek_by_key_subkey(key.clone(), self.max_block_number)?; + + if let Some(vv) = seek_res { + if vv.block_number > self.max_block_number { + // step back to the last dup < max + return Ok(self.cursor.prev_dup()?); + } + // already at the dup = max + return Ok(Some((key, vv))); + } + + // No dup >= max ⇒ either key absent or all dups < max. Check if key exists: + if self.cursor.seek_exact(key.clone())?.is_none() { + return Ok(None); + } + + // Key exists ⇒ take last dup (< max). + if let Some(vv) = self.cursor.last_dup()? { + return Ok(Some((key, vv))); + } + Ok(None) + } + + /// Returns a non-deleted latest version for exactly `key`, if any. + fn seek_exact(&mut self, key: T::Key) -> OpProofsStorageResult> { + if let Some((latest_key, latest_value)) = self.latest_version_for_key(key)? && + let MaybeDeleted(Some(v)) = latest_value.value + { + return Ok(Some((latest_key, v))); + } + Ok(None) + } + + /// Walk forward from `first_key` (inclusive) until we find a *live* latest-≤-max value. + /// `first_key` must already be a *real key* in the table. + fn next_live_from( + &mut self, + mut first_key: T::Key, + ) -> OpProofsStorageResult> { + loop { + // Compute latest version ≤ max for this key + if let Some((k, v)) = self.seek_exact(first_key.clone())? { + return Ok(Some((k, v))); + } + + // Move to next distinct key, or EOF + let Some((next_key, _)) = self.cursor.next_no_dup()? else { + return Ok(None); + }; + + first_key = next_key; + } + } + + /// Seek to the first non-deleted latest version at or after `start_key`. + /// Logic: + /// - Try exact key first (above). If alive, return it. + /// - Otherwise hop to next distinct key and repeat until we find a live version or hit EOF. + fn seek(&mut self, start_key: T::Key) -> OpProofsStorageResult> { + // Position MDBX at first key >= start_key + if let Some((first_key, _)) = self.cursor.seek(start_key)? { + return self.next_live_from(first_key); + } + Ok(None) + } + + /// Advance to the next distinct key from the current MDBX position + /// and return its non-deleted latest version, if any. + /// Next distinct key; if not positioned, start from `T::Key::default()`. + fn next(&mut self) -> OpProofsStorageResult> + where + T::Key: Default, + { + // If not positioned, start from the beginning (default key). + if self.cursor.current()?.is_none() { + let Some((first_key, _)) = self.cursor.seek(T::Key::default())? else { + return Ok(None); + }; + return self.next_live_from(first_key); + } + + // Otherwise advance to next distinct key and resume the walk. + let Some((next_key, _)) = self.cursor.next_no_dup()? else { + return Ok(None); + }; + self.next_live_from(next_key) + } +} + +/// MDBX implementation of [`TrieCursor`]. +#[derive(Debug)] +pub struct MdbxTrieCursor { + inner: BlockNumberVersionedCursor, + hashed_address: Option, +} + +impl< + V, + T: Table> + DupSort, + Cursor: DbCursorRO + DbDupCursorRO, +> MdbxTrieCursor +{ + /// Initializes new [`MdbxTrieCursor`]. + pub const fn new(cursor: Cursor, max_block_number: u64, hashed_address: Option) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, max_block_number), hashed_address } + } +} + +impl TrieCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn seek_exact( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + Ok(self + .inner + .seek_exact(StoredNibbles(path)) + .map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn seek( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + Ok(self + .inner + .seek(StoredNibbles(path)) + .map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.inner.next().map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn current(&mut self) -> Result, DatabaseError> { + self.inner.cursor.current().map(|opt| opt.map(|(StoredNibbles(n), _)| n)) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl TrieCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn seek_exact( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + let key = StorageTrieKey::new(address, StoredNibbles(path)); + return Ok(self.inner.seek_exact(key).map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?); + } + Ok(None) + } + + fn seek( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + let key = StorageTrieKey::new(address, StoredNibbles(path)); + return Ok(self.inner.seek(key).map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?); + } + Ok(None) + } + + fn next(&mut self) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + // If the cursor is not positioned, we need to seek to the first key for our bound + // address to ensure we start iterating from the correct position in the + // table. This is necessary because BlockNumberVersionedCursor::next() would + // otherwise start from T::Key::default() (the beginning of the entire + // table), which would cause us to miss entries for non-first addresses. + if !self.inner.is_positioned()? { + return self.seek(Nibbles::default()); + } + + return Ok(self.inner.next().map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?); + } + Ok(None) + } + + fn current(&mut self) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + return self.inner.cursor.current().map(|opt| { + opt.and_then(|(k, _)| (k.hashed_address == address).then_some(k.path.0)) + }); + } + Ok(None) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl TrieStorageCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = Some(hashed_address); + } +} + +/// MDBX implementation of [`HashedCursor`] for storage state. +#[derive(Debug)] +pub struct MdbxStorageCursor { + inner: BlockNumberVersionedCursor, + hashed_address: B256, +} + +impl MdbxStorageCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + /// Initializes new [`MdbxStorageCursor`] + pub const fn new(cursor: Cursor, block_number: u64, hashed_address: B256) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, block_number), hashed_address } + } +} + +impl HashedCursor for MdbxStorageCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + type Value = U256; + + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + let storage_key = HashedStorageKey::new(self.hashed_address, key); + + // hashed storage values can be zero, which means the storage slot is deleted, so we should + // skip those + let result = self.inner.seek(storage_key).map(|opt| { + opt.and_then(|(k, v)| { + // Only return entries that belong to the bound address + (k.hashed_address == self.hashed_address).then_some((k.hashed_storage_key, v.0)) + }) + })?; + + if let Some((_, v)) = result && + v.is_zero() + { + return self.next(); + } + + Ok(result) + } + + fn next(&mut self) -> Result, DatabaseError> { + // If the cursor is not positioned, we need to seek to the first key for our bound address + // to ensure we start iterating from the correct position in the table. + // This is necessary because BlockNumberVersionedCursor::next() would otherwise start + // from T::Key::default() (the beginning of the entire table), which would cause us + // to miss entries for non-first addresses. + if !self.inner.is_positioned()? { + return self.seek(B256::ZERO); + } + + loop { + let result = self.inner.next().map(|opt| { + opt.and_then(|(k, v)| { + // Only return entries that belong to the bound address + (k.hashed_address == self.hashed_address).then_some((k.hashed_storage_key, v.0)) + }) + })?; + + // hashed storage values can be zero, which means the storage slot is deleted, so we + // should skip those + if let Some((_, v)) = result && + v.is_zero() + { + continue; + } + + return Ok(result); + } + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl HashedStorageCursor for MdbxStorageCursor> { + fn is_storage_empty(&mut self) -> Result { + Ok(self.seek(B256::ZERO)?.is_none()) + } + + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = hashed_address + } +} + +/// MDBX implementation of [`HashedCursor`] for account state. +#[derive(Debug)] +pub struct MdbxAccountCursor { + inner: BlockNumberVersionedCursor, +} + +impl MdbxAccountCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + /// Initializes new `MdbxAccountCursor` + pub const fn new(cursor: Cursor, block_number: u64) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, block_number) } + } +} + +impl HashedCursor for MdbxAccountCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + type Value = Account; + + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + Ok(self.inner.seek(key)?) + } + + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.inner.next()?) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::{StorageValue, models}; + use reth_db::{ + DatabaseEnv, + mdbx::{DatabaseArguments, init_db_for}, + }; + use reth_db_api::{ + Database, + cursor::DbDupCursorRW, + transaction::{DbTx, DbTxMut}, + }; + use reth_trie::{BranchNodeCompact, Nibbles, StoredNibbles}; + use tempfile::TempDir; + + fn setup_db() -> DatabaseEnv { + let tmp = TempDir::new().expect("create tmpdir"); + init_db_for::<_, models::Tables>(tmp, DatabaseArguments::default()).expect("init db") + } + + fn stored(path: Nibbles) -> StoredNibbles { + StoredNibbles(path) + } + + fn node() -> BranchNodeCompact { + BranchNodeCompact::default() + } + + fn append_account_trie( + wtx: &::TXMut, + key: StoredNibbles, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write cursor"); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_storage_trie( + wtx: &::TXMut, + address: B256, + path: Nibbles, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write cursor"); + let key = StorageTrieKey::new(address, StoredNibbles(path)); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_hashed_storage( + wtx: &::TXMut, + addr: B256, + slot: B256, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write"); + let key = HashedStorageKey::new(addr, slot); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val.map(StorageValue)) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_hashed_account( + wtx: &::TXMut, + key: B256, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write"); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + // Open a dup-RO cursor and wrap it in a BlockNumberVersionedCursor with a given bound. + fn version_cursor( + tx: &::TX, + max_block: u64, + ) -> BlockNumberVersionedCursor> { + let cur = tx.cursor_dup_read::().expect("dup ro cursor"); + BlockNumberVersionedCursor::new(cur, max_block) + } + + fn account_trie_cursor( + tx: &'_ ::TX, + max_block: u64, + ) -> MdbxTrieCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + // For account trie the address is not used; pass None. + MdbxTrieCursor::new(c, max_block, None) + } + + // Helper: build a Storage trie cursor bound to an address + fn storage_trie_cursor( + tx: &'_ ::TX, + max_block: u64, + address: B256, + ) -> MdbxTrieCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxTrieCursor::new(c, max_block, Some(address)) + } + + fn storage_cursor( + tx: &'_ ::TX, + max_block: u64, + address: B256, + ) -> MdbxStorageCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxStorageCursor::new(c, max_block, address) + } + + fn account_cursor( + tx: &'_ ::TX, + max_block: u64, + ) -> MdbxAccountCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxAccountCursor::new(c, max_block) + } + + // Assert helper: ensure the chosen VersionedValue has the expected block and deletion flag. + fn assert_block( + got: Option<(StoredNibbles, VersionedValue)>, + expected_block: u64, + expect_deleted: bool, + ) { + let (_, vv) = got.expect("expected Some(..)"); + assert_eq!(vv.block_number, expected_block, "wrong block chosen"); + let is_deleted = matches!(vv.value, MaybeDeleted(None)); + assert_eq!(is_deleted, expect_deleted, "tombstone mismatch"); + } + + /// No entry for key → None. + #[test] + fn latest_version_for_key_none_when_key_absent() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cursor = version_cursor(&tx, 100); + + let out = cursor + .latest_version_for_key(stored(Nibbles::default())) + .expect("should not return error"); + assert!(out.is_none(), "absent key must return None"); + } + + /// Exact match at max (live) → pick it. + #[test] + fn latest_version_for_key_picks_value_at_max_if_present() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 50, Some(node())); // == max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 50, false); + } + + /// When `seek_by_key_subkey` points to the subkey > max - fallback to the prev. + #[test] + fn latest_version_for_key_picks_latest_below_max_when_next_is_above() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 30, Some(node())); // expected + append_account_trie(&wtx, k.clone(), 70, Some(node())); // > max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 30, false); + } + + /// No ≥ max but key exists → use last < max. + #[test] + fn latest_version_for_key_picks_last_below_max_when_none_at_or_above() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 40, Some(node())); // expected (max=100) + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 40, false); + } + + /// All entries are > max → None. + #[test] + fn latest_version_for_key_none_when_everything_is_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 60, Some(node())); + append_account_trie(&wtx, k1.clone(), 70, Some(node())); + append_account_trie(&wtx, k2, 40, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k1).expect("ok"); + assert!(out.is_none(), "no dup ≤ max ⇒ None"); + } + + /// Single dup < max → pick it. + #[test] + fn latest_version_for_key_picks_single_below_max() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 25, Some(node())); // < max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 25, false); + } + + /// Single dup == max → pick it. + #[test] + fn latest_version_for_key_picks_single_at_max() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 50, Some(node())); // == max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 50, false); + } + + /// Latest ≤ max is a tombstone → return it (this API doesn't filter). + #[test] + fn latest_version_for_key_returns_tombstone_if_latest_is_deleted() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 90, None); // latest ≤ max, but deleted + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 90, true); + } + + /// Should skip tombstones and return None when the latest ≤ max is deleted. + #[test] + fn seek_exact_skips_tombstone_returns_none() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 90, None); // latest ≤ max is tombstoned + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.seek_exact(k).expect("ok"); + assert!(out.is_none(), "seek_exact must filter out deleted latest value"); + } + + /// Empty table → None. + #[test] + fn seek_empty_returns_none() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.seek(stored(Nibbles::from_nibbles([0x0A]))).expect("ok"); + assert!(out.is_none()); + } + + /// Start at an existing key whose latest ≤ max is live → returns that key. + #[test] + fn seek_at_live_key_returns_it() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 20, Some(node())); // latest ≤ max + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k.clone()).expect("ok").expect("some"); + assert_eq!(out.0, k); + } + + /// Start at an existing key whose latest ≤ max is tombstoned → skip to next key with live + /// value. + #[test] + fn seek_skips_tombstoned_key_to_next_live_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + // Key 0x10 latest ≤ max is deleted + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + append_account_trie(&wtx, k1.clone(), 20, None); // tombstone at latest ≤ max + // Next key has live + append_account_trie(&wtx, k2.clone(), 5, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start between keys → returns the next key’s live latest ≤ max. + #[test] + fn seek_between_keys_returns_next_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0C])); + let k3 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2.clone(), 10, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Start at 0x15 (between 0x10 and 0x20) + + let out = cur.seek(k3).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start after the last key → None. + #[test] + fn seek_after_last_returns_none() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + let k3 = stored(Nibbles::from_nibbles([0x0C])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2, 10, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.seek(k3).expect("ok"); + assert!(out.is_none()); + } + + /// If the first key at-or-after has only versions > max, it is effectively not visible → skip + /// to next. + #[test] + fn seek_skips_keys_with_only_versions_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 60, Some(node())); + append_account_trie(&wtx, k2.clone(), 40, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start at a key with mixed versions; latest ≤ max is tombstone → skip to next key with live. + #[test] + fn seek_mixed_versions_tombstone_latest_skips_to_next_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + append_account_trie(&wtx, k1.clone(), 30, None); + append_account_trie(&wtx, k2.clone(), 5, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 30); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// When not positioned should start from default key and return the first live key. + #[test] + fn next_unpositioned_starts_from_default_returns_first_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); // first live + append_account_trie(&wtx, k2, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + // Unpositioned cursor + let mut cur = version_cursor(&tx, 100); + + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k1); + } + + /// After positioning on a live key via `seek()`, `next()` should advance to the next live key. + #[test] + fn next_advances_from_current_live_to_next_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); // live + append_account_trie(&wtx, k2.clone(), 10, Some(node())); // next live + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Position at k1 + let _ = cur.seek(k1).expect("ok").expect("some"); + // Next should yield k2 + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// If the next key's latest ≤ max is tombstone, `next()` should skip to the next live key. + #[test] + fn next_skips_tombstoned_key_to_next_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); // will be tombstoned at latest ≤ max + let k3 = stored(Nibbles::from_nibbles([0x0C])); // next live + + { + let wtx = db.tx_mut().expect("rw tx"); + // k1 live + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + // k2: latest ≤ max is tombstone + append_account_trie(&wtx, k2.clone(), 10, Some(node())); + append_account_trie(&wtx, k2, 20, None); + // k3 live + append_account_trie(&wtx, k3.clone(), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + // Position at k1 + let _ = cur.seek(k1).expect("ok").expect("some"); + // next should skip k2 (tombstoned latest) and return k3 + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k3); + } + + /// If positioned on the last live key, `next()` should return None (EOF). + #[test] + fn next_returns_none_at_eof() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); // last key + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2.clone(), 10, Some(node())); // last live + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Position at the last key k2 + let _ = cur.seek(k2).expect("ok").expect("some"); + // `next()` should hit EOF + let out = cur.next().expect("ok"); + assert!(out.is_none()); + } + + /// If the first key has only versions > max, `next()` should skip it and return the next live + /// key. + #[test] + fn next_skips_keys_with_only_versions_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); // only > max + let k2 = stored(Nibbles::from_nibbles([0x0B])); // ≤ max live + + { + let wtx = db.tx_mut().expect("rw tx"); + // k1 only above max (max=50) + append_account_trie(&wtx, k1, 60, Some(node())); + // k2 within max + append_account_trie(&wtx, k2.clone(), 40, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + // Unpositioned; `next()` will start from default and walk + let mut cur = version_cursor(&tx, 50); + + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Empty table: `next()` should return None. + #[test] + fn next_on_empty_returns_none() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.next().expect("ok"); + assert!(out.is_none()); + } + + // ----------------- Account trie cursor thin-wrapper checks ----------------- + + #[test] + fn account_seek_exact_live_maps_key_and_value() { + let db = setup_db(); + let k = Nibbles::from_nibbles([0x0A]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + + // Build wrapper + let mut cur = account_trie_cursor(&tx, 100); + + // Wrapper should return (Nibbles, BranchNodeCompact) + let out = TrieCursor::seek_exact(&mut cur, k).expect("ok").expect("some"); + assert_eq!(out.0, k); + } + + #[test] + fn account_seek_exact_filters_tombstone() { + let db = setup_db(); + let k = Nibbles::from_nibbles([0x0B]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k), 5, Some(node())); + append_account_trie(&wtx, StoredNibbles(k), 9, None); // latest ≤ max tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = account_trie_cursor(&tx, 10); + + let out = TrieCursor::seek_exact(&mut cur, k).expect("ok"); + assert!(out.is_none(), "account seek_exact must filter tombstone"); + } + + #[test] + fn account_seek_and_next_and_current_roundtrip() { + let db = setup_db(); + let k1 = Nibbles::from_nibbles([0x01]); + let k2 = Nibbles::from_nibbles([0x02]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k1), 10, Some(node())); + append_account_trie(&wtx, StoredNibbles(k2), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = account_trie_cursor(&tx, 100); + + // seek at k1 + let out1 = TrieCursor::seek(&mut cur, k1).expect("ok").expect("some"); + assert_eq!(out1.0, k1); + + // current should be k1 + let cur_k = TrieCursor::current(&mut cur).expect("ok").expect("some"); + assert_eq!(cur_k, k1); + + // next should move to k2 + let out2 = TrieCursor::next(&mut cur).expect("ok").expect("some"); + assert_eq!(out2.0, k2); + } + + // ----------------- Storage trie cursor thin-wrapper checks ----------------- + + #[test] + fn storage_seek_exact_respects_address_filter() { + let db = setup_db(); + + let addr_a = B256::from([0xAA; 32]); + let addr_b = B256::from([0xBB; 32]); + + let path = Nibbles::from_nibbles([0x0D]); + + { + let wtx = db.tx_mut().expect("rw tx"); + // insert only under B + append_storage_trie(&wtx, addr_b, path, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + + // Cursor bound to A must not see B’s data + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + let out_a = TrieCursor::seek_exact(&mut cur_a, path).expect("ok"); + assert!(out_a.is_none(), "no data for addr A"); + + // Cursor bound to B should see it + let mut cur_b = storage_trie_cursor(&tx, 100, addr_b); + let out_b = TrieCursor::seek_exact(&mut cur_b, path).expect("ok").expect("some"); + assert_eq!(out_b.0, path); + } + + #[test] + fn storage_seek_returns_first_key_for_bound_address() { + let db = setup_db(); + + let addr_a = B256::from([0x11; 32]); + let addr_b = B256::from([0x22; 32]); + + let p1 = Nibbles::from_nibbles([0x01]); + let p2 = Nibbles::from_nibbles([0x02]); + let p3 = Nibbles::from_nibbles([0x03]); + + { + let wtx = db.tx_mut().expect("rw tx"); + // For A: only p2 + append_storage_trie(&wtx, addr_a, p2, 10, Some(node())); + // For B: p1 + append_storage_trie(&wtx, addr_b, p1, 10, Some(node())); + wtx.commit().expect("commit"); + } + + // test seek behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // seek at p1: for A there is no p1; the next key >= p1 under A is p2 + let out = TrieCursor::seek(&mut cur_a, p1).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek at p2: exact match + let out = TrieCursor::seek(&mut cur_a, p2).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek at p3: no p3 under A; no next key ≥ p3 under A → None + let out = TrieCursor::seek(&mut cur_a, p3).expect("ok"); + assert!(out.is_none(), "no key ≥ p3 under A"); + } + + // test next behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + let out = TrieCursor::next(&mut cur_a).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // next should yield None as there is no further key under A + let out = TrieCursor::next(&mut cur_a).expect("ok"); + assert!(out.is_none(), "no more keys under A"); + + // current should return None + let out = TrieCursor::current(&mut cur_a).expect("ok"); + assert!(out.is_none(), "no current key after EOF"); + } + + // test seek_exact behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // seek_exact at p1: no exact match + let out = TrieCursor::seek_exact(&mut cur_a, p1).expect("ok"); + assert!(out.is_none(), "no exact p1 under A"); + + // seek_exact at p2: exact match + let out = TrieCursor::seek_exact(&mut cur_a, p2).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek_exact at p3: no exact match + let out = TrieCursor::seek_exact(&mut cur_a, p3).expect("ok"); + assert!(out.is_none(), "no exact p3 under A"); + } + } + + #[test] + fn storage_next_stops_at_address_boundary() { + let db = setup_db(); + + let addr_a = B256::from([0x33; 32]); + let addr_b = B256::from([0x44; 32]); + + let p1 = Nibbles::from_nibbles([0x05]); // under A + let p2 = Nibbles::from_nibbles([0x06]); // under B (next key overall) + + { + let wtx = db.tx_mut().expect("rw tx"); + append_storage_trie(&wtx, addr_a, p1, 10, Some(node())); + append_storage_trie(&wtx, addr_b, p2, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // position at p1 (A) + let _ = TrieCursor::seek_exact(&mut cur_a, p1).expect("ok").expect("some"); + + // next should reach boundary; impl filters different address and returns None + let out = TrieCursor::next(&mut cur_a).expect("ok"); + assert!(out.is_none(), "next() should stop when next key is a different address"); + } + + #[test] + fn storage_current_maps_key() { + let db = setup_db(); + + let addr = B256::from([0x55; 32]); + let p = Nibbles::from_nibbles([0x09]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_storage_trie(&wtx, addr, p, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = storage_trie_cursor(&tx, 100, addr); + + let _ = TrieCursor::seek_exact(&mut cur, p).expect("ok").expect("some"); + + let now = TrieCursor::current(&mut cur).expect("ok").expect("some"); + assert_eq!(now, p); + } + + #[test] + fn hashed_storage_seek_maps_slot_and_value() { + let db = setup_db(); + let addr = B256::from([0xAA; 32]); + let slot = B256::from([0x10; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, slot, 10, Some(U256::from(7))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr); + + let (got_slot, got_val) = cur.seek(slot).expect("ok").expect("some"); + assert_eq!(got_slot, slot); + assert_eq!(got_val, U256::from(7)); + } + + #[test] + fn hashed_storage_seek_filters_tombstone() { + let db = setup_db(); + let addr = B256::from([0xAB; 32]); + let slot = B256::from([0x11; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, slot, 5, Some(U256::from(1))); + append_hashed_storage(&wtx, addr, slot, 9, None); // latest ≤ max is tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 10, addr); + + let out = cur.seek(slot).expect("ok"); + assert!(out.is_none(), "wrapper must filter tombstoned latest"); + } + + #[test] + fn hashed_storage_seek_and_next_roundtrip() { + let db = setup_db(); + let addr = B256::from([0xAC; 32]); + let s1 = B256::from([0x01; 32]); + let s2 = B256::from([0x02; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, s1, 10, Some(U256::from(11))); + append_hashed_storage(&wtx, addr, s2, 10, Some(U256::from(22))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr); + + let (k1, v1) = cur.seek(s1).expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.next().expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + } + + #[test] + fn hashed_storage_address_boundary() { + let db = setup_db(); + let addr1 = B256::from([0xAC; 32]); + let addr2 = B256::from([0xAD; 32]); + let s1 = B256::from([0x01; 32]); + let s2 = B256::from([0x02; 32]); + let s3 = B256::from([0x03; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr1, s1, 10, Some(U256::from(11))); + append_hashed_storage(&wtx, addr1, s2, 10, Some(U256::from(22))); + wtx.commit().expect("commit"); + } + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr2, s1, 10, Some(U256::from(33))); + append_hashed_storage(&wtx, addr2, s2, 10, Some(U256::from(44))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr1); + + let (k1, v1) = cur.next().expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.next().expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + + let out = cur.next().expect("ok"); + assert!(out.is_none(), "should stop at address boundary"); + + let (k1, v1) = cur.seek(s1).expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.seek(s2).expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + + let out = cur.seek(s3).expect("ok"); + assert!(out.is_none(), "should not see keys from other address"); + } + + #[test] + fn hashed_account_seek_maps_key_and_value() { + let db = setup_db(); + let key = B256::from([0x20; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, key, 10, Some(Account::default())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 100); + + let (got_key, _acc) = cur.seek(key).expect("ok").expect("some"); + assert_eq!(got_key, key); + } + + #[test] + fn hashed_account_seek_filters_tombstone() { + let db = setup_db(); + let key = B256::from([0x21; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, key, 5, Some(Account::default())); + append_hashed_account(&wtx, key, 9, None); // latest ≤ max is tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 10); + + let out = cur.seek(key).expect("ok"); + assert!(out.is_none(), "wrapper must filter tombstoned latest"); + } + + #[test] + fn hashed_account_seek_and_next_roundtrip() { + let db = setup_db(); + let k1 = B256::from([0x01; 32]); + let k2 = B256::from([0x02; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, k1, 10, Some(Account::default())); + append_hashed_account(&wtx, k2, 10, Some(Account::default())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 100); + + let (got1, _) = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(got1, k1); + + let (got2, _) = cur.next().expect("ok").expect("some"); + assert_eq!(got2, k2); + } + + /// Regression test: `MdbxStorageCursor` `next()` should work without explicit `seek()` + /// when cursor is constructed for a non-first key. + /// + /// Bug: When a storage cursor is created for a specific address (e.g., 0x02), + /// calling `next()` without first calling `seek()` returns None instead of the first + /// slot for that address. This only manifests when the address is not the first + /// in the table. + #[test] + fn storage_cursor_next_without_seek_for_non_first_address() { + let db = setup_db(); + let addr1 = B256::from([0x01; 32]); // First address + let addr2 = B256::from([0x02; 32]); // Second address (non-first) + let slot1 = B256::from([0x11; 32]); + let slot2 = B256::from([0x12; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + // Add storage for first address + append_hashed_storage(&wtx, addr1, slot1, 10, Some(U256::from(100))); + + // Add storage for second address + append_hashed_storage(&wtx, addr2, slot2, 10, Some(U256::from(200))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + + // Test with addr1 (first address) - this typically works + let mut cur1 = storage_cursor(&tx, 100, addr1); + let result1 = cur1.next().expect("ok"); + assert!(result1.is_some(), "next() should return data for first address without seek()"); + if let Some((key, val)) = result1 { + assert_eq!(key, slot1); + assert_eq!(val, U256::from(100)); + } + + // Test with addr2 (non-first address) - this demonstrates the bug fix + let mut cur2 = storage_cursor(&tx, 100, addr2); + let result2_without_seek = cur2.next().expect("ok"); + + assert!( + result2_without_seek.is_some(), + "next() should return data for non-first address without seek()" + ); + if let Some((key, val)) = result2_without_seek { + assert_eq!(key, slot2); + assert_eq!(val, U256::from(200)); + } + + // Verify that seek() works correctly + let mut cur3 = storage_cursor(&tx, 100, addr2); + let result3_with_seek = cur3.seek(slot2).expect("ok"); + assert!(result3_with_seek.is_some(), "seek() should find the slot for addr2"); + if let Some((key, val)) = result3_with_seek { + assert_eq!(key, slot2); + assert_eq!(val, U256::from(200)); + } + } + + /// Regression test: `MdbxTrieCursor` `next()` should work without `seek()` + /// for non-first addresses. + #[test] + fn storage_trie_cursor_next_without_seek_for_non_first_address() { + let db = setup_db(); + let addr1 = B256::from([0x01; 32]); + let addr2 = B256::from([0x02; 32]); + let path1 = Nibbles::from_nibbles([0x0A]); + let path2 = Nibbles::from_nibbles([0x0B]); + + { + let wtx = db.tx_mut().expect("rw"); + append_storage_trie(&wtx, addr1, path1, 10, Some(node())); + append_storage_trie(&wtx, addr2, path2, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + + // Test addr1 (first) - works + let mut cur1 = storage_trie_cursor(&tx, 100, addr1); + let result1 = TrieCursor::next(&mut cur1).expect("ok"); + assert!(result1.is_some()); + assert_eq!(result1.unwrap().0, path1); + + // Test addr2 (non-first) - should also work now + let mut cur2 = storage_trie_cursor(&tx, 100, addr2); + let result2 = TrieCursor::next(&mut cur2).expect("ok"); + assert!(result2.is_some(), "next() should work for non-first address without seek()"); + assert_eq!(result2.unwrap().0, path2); + } +} diff --git a/rust/op-reth/crates/trie/src/db/mod.rs b/rust/op-reth/crates/trie/src/db/mod.rs new file mode 100644 index 0000000000000..042b682074f6e --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/mod.rs @@ -0,0 +1,17 @@ +//! MDBX implementation of [`OpProofsStore`](crate::OpProofsStore). +//! +//! This module provides a complete MDBX implementation of the +//! [`OpProofsStore`](crate::OpProofsStore) trait. It uses the [`reth_db`] +//! crate for database interactions and defines the necessary tables and models for storing trie +//! branches, accounts, and storage leaves. + +mod models; +pub use models::*; + +mod store; +pub use store::MdbxProofsStorage; + +mod cursor; +pub use cursor::{ + BlockNumberVersionedCursor, MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, +}; diff --git a/rust/op-reth/crates/trie/src/db/models/block.rs b/rust/op-reth/crates/trie/src/db/models/block.rs new file mode 100644 index 0000000000000..c2066dad1f6ca --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/models/block.rs @@ -0,0 +1,76 @@ +use alloy_eips::BlockNumHash; +use alloy_primitives::B256; +use bytes::BufMut; +use derive_more::{From, Into}; +use reth_db::{ + DatabaseError, + table::{Compress, Decompress}, +}; +use serde::{Deserialize, Serialize}; + +/// Wrapper for block number and block hash tuple to implement [`Compress`]/[`Decompress`]. +/// +/// Used for storing block metadata (number + hash). +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, From, Into)] +pub struct BlockNumberHash(BlockNumHash); + +impl Compress for BlockNumberHash { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + // Encode block number (8 bytes, big-endian) + hash (32 bytes) = 40 bytes total + buf.put_u64(self.0.number); + buf.put_slice(self.0.hash.as_slice()); + } +} + +impl Decompress for BlockNumberHash { + fn decompress(value: &[u8]) -> Result { + if value.len() != 40 { + return Err(DatabaseError::Decode); + } + + let number = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?); + let hash = B256::from_slice(&value[8..40]); + + Ok(Self(BlockNumHash { number, hash })) + } +} + +impl BlockNumberHash { + /// Create new instance. + pub const fn new(number: u64, hash: B256) -> Self { + Self(BlockNumHash { number, hash }) + } + + /// Get the block number. + pub const fn number(&self) -> u64 { + self.0.number + } + + /// Get the block hash. + pub const fn hash(&self) -> &B256 { + &self.0.hash + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + + #[test] + fn test_block_number_hash_roundtrip() { + let test_cases = vec![ + BlockNumberHash::new(0, B256::ZERO), + BlockNumberHash::new(42, B256::repeat_byte(0xaa)), + BlockNumberHash::new(u64::MAX, B256::repeat_byte(0xff)), + ]; + + for original in test_cases { + let compressed = original.compress(); + let decompressed = BlockNumberHash::decompress(&compressed).unwrap(); + assert_eq!(original, decompressed); + } + } +} diff --git a/rust/op-reth/crates/trie/src/db/models/change_set.rs b/rust/op-reth/crates/trie/src/db/models/change_set.rs new file mode 100644 index 0000000000000..dbfdc815e434e --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/models/change_set.rs @@ -0,0 +1,128 @@ +use crate::db::{HashedStorageKey, StorageTrieKey}; +use alloy_primitives::B256; +use reth_db::{ + DatabaseError, + table::{self, Decode, Encode}, +}; +use reth_trie_common::StoredNibbles; +use serde::{Deserialize, Serialize}; + +/// The keys of the entries in the history tables. +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct ChangeSet { + /// Keys changed in [`AccountTrieHistory`](super::AccountTrieHistory) table. + pub account_trie_keys: Vec, + /// Keys changed in [`StorageTrieHistory`](super::StorageTrieHistory) table. + pub storage_trie_keys: Vec, + /// Keys changed in [`HashedAccountHistory`](super::HashedAccountHistory) table. + pub hashed_account_keys: Vec, + /// Keys changed in [`HashedStorageHistory`](super::HashedStorageHistory) table. + pub hashed_storage_keys: Vec, +} + +impl table::Encode for ChangeSet { + type Encoded = Vec; + + fn encode(self) -> Self::Encoded { + bincode::serde::encode_to_vec(&self, bincode::config::standard()) + .expect("ChangeSet serialization should not fail") + } +} + +impl table::Decode for ChangeSet { + fn decode(value: &[u8]) -> Result { + bincode::serde::decode_from_slice(value, bincode::config::standard()) + .map(|(v, _)| v) + .map_err(|_| DatabaseError::Decode) + } +} + +impl table::Compress for ChangeSet { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + let encoded = self.clone().encode(); + buf.put_slice(&encoded); + } +} + +impl table::Decompress for ChangeSet { + fn decompress(value: &[u8]) -> Result { + Self::decode(value) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_db::table::{Compress, Decompress}; + + #[test] + fn test_encode_decode_empty_change_set() { + let change_set = ChangeSet { + account_trie_keys: vec![], + storage_trie_keys: vec![], + hashed_account_keys: vec![], + hashed_storage_keys: vec![], + }; + + let encoded = change_set.clone().encode(); + let decoded = ChangeSet::decode(&encoded).expect("Failed to decode"); + assert_eq!(change_set, decoded); + } + + #[test] + fn test_encode_decode_populated_change_set() { + let account_key = StoredNibbles::from(vec![1, 2, 3, 4]); + let storage_key = StorageTrieKey { + hashed_address: B256::repeat_byte(0x11), + path: StoredNibbles::from(vec![5, 6, 7, 8]), + }; + let hashed_storage_key = HashedStorageKey { + hashed_address: B256::repeat_byte(0x22), + hashed_storage_key: B256::repeat_byte(0x33), + }; + + let change_set = ChangeSet { + account_trie_keys: vec![account_key], + storage_trie_keys: vec![storage_key], + hashed_account_keys: vec![B256::repeat_byte(0x44)], + hashed_storage_keys: vec![hashed_storage_key], + }; + + let encoded = change_set.clone().encode(); + let decoded = ChangeSet::decode(&encoded).expect("Failed to decode"); + assert_eq!(change_set, decoded); + } + + #[test] + fn test_decode_invalid_data() { + let invalid_data = vec![0xFF; 32]; + let result = ChangeSet::decode(&invalid_data); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), DatabaseError::Decode)); + } + + #[test] + fn test_compress_decompress() { + let change_set = ChangeSet { + account_trie_keys: vec![StoredNibbles::from(vec![1, 2, 3])], + storage_trie_keys: vec![StorageTrieKey { + hashed_address: B256::ZERO, + path: StoredNibbles::from(vec![4, 5, 6]), + }], + hashed_account_keys: vec![B256::ZERO], + hashed_storage_keys: vec![HashedStorageKey { + hashed_address: B256::ZERO, + hashed_storage_key: B256::repeat_byte(0x42), + }], + }; + + let mut buf = Vec::new(); + change_set.compress_to_buf(&mut buf); + + let decompressed = ChangeSet::decompress(&buf).expect("Failed to decompress"); + assert_eq!(change_set, decompressed); + } +} diff --git a/rust/op-reth/crates/trie/src/db/models/kv.rs b/rust/op-reth/crates/trie/src/db/models/kv.rs new file mode 100644 index 0000000000000..5585336f4bba1 --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/models/kv.rs @@ -0,0 +1,66 @@ +use crate::db::{ + AccountTrieHistory, HashedAccountHistory, HashedStorageHistory, HashedStorageKey, MaybeDeleted, + StorageTrieHistory, StorageTrieKey, StorageValue, VersionedValue, +}; +use alloy_primitives::B256; +use reth_db::table::{DupSort, Table}; +use reth_primitives_traits::Account; +use reth_trie_common::{BranchNodeCompact, Nibbles, StoredNibbles}; + +/// Helper to convert inputs into a table key or kv pair. +pub trait IntoKV { + /// Convert `self` into the table key. + fn into_key(self) -> Tab::Key; + + /// Convert `self` into kv for the given `block_number`. + fn into_kv(self, block_number: u64) -> (Tab::Key, Tab::Value); +} + +impl IntoKV for (Nibbles, Option) { + fn into_key(self) -> StoredNibbles { + StoredNibbles::from(self.0) + } + + fn into_kv(self, block_number: u64) -> (StoredNibbles, VersionedValue) { + let (path, node) = self; + (StoredNibbles::from(path), VersionedValue { block_number, value: MaybeDeleted(node) }) + } +} + +impl IntoKV for (B256, Nibbles, Option) { + fn into_key(self) -> StorageTrieKey { + let (hashed_address, path, _) = self; + StorageTrieKey::new(hashed_address, StoredNibbles::from(path)) + } + fn into_kv(self, block_number: u64) -> (StorageTrieKey, VersionedValue) { + let (hashed_address, path, node) = self; + ( + StorageTrieKey::new(hashed_address, StoredNibbles::from(path)), + VersionedValue { block_number, value: MaybeDeleted(node) }, + ) + } +} + +impl IntoKV for (B256, Option) { + fn into_key(self) -> B256 { + self.0 + } + fn into_kv(self, block_number: u64) -> (B256, VersionedValue) { + let (hashed_address, account) = self; + (hashed_address, VersionedValue { block_number, value: MaybeDeleted(account) }) + } +} + +impl IntoKV for (B256, B256, Option) { + fn into_key(self) -> HashedStorageKey { + let (hashed_address, hashed_storage_key, _) = self; + HashedStorageKey::new(hashed_address, hashed_storage_key) + } + fn into_kv(self, block_number: u64) -> (HashedStorageKey, VersionedValue) { + let (hashed_address, hashed_storage_key, value) = self; + ( + HashedStorageKey::new(hashed_address, hashed_storage_key), + VersionedValue { block_number, value: MaybeDeleted(value) }, + ) + } +} diff --git a/rust/op-reth/crates/trie/src/db/models/mod.rs b/rust/op-reth/crates/trie/src/db/models/mod.rs new file mode 100644 index 0000000000000..b6d524528c6f9 --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/models/mod.rs @@ -0,0 +1,85 @@ +//! MDBX implementation of [`OpProofsStore`](crate::OpProofsStore). +//! +//! This module provides a complete MDBX implementation of the +//! [`OpProofsStore`](crate::OpProofsStore) trait. It uses the [`reth_db`] crate for +//! database interactions and defines the necessary tables and models for storing trie branches, +//! accounts, and storage leaves. + +mod block; +pub use block::*; +mod version; +pub use version::*; +mod storage; +pub use storage::*; +mod change_set; +pub(crate) mod kv; +pub use change_set::*; +pub use kv::*; + +use alloy_primitives::B256; +use reth_db::{ + TableSet, TableType, TableViewer, + table::{DupSort, TableInfo}, + tables, +}; +use reth_primitives_traits::Account; +use reth_trie_common::{BranchNodeCompact, StoredNibbles}; +use std::fmt; + +tables! { + /// Stores historical branch nodes for the account state trie. + /// + /// Each entry maps a compact-encoded trie path (`StoredNibbles`) to its versioned branch node. + /// Multiple versions of the same node are stored using the block number as a subkey. + table AccountTrieHistory { + type Key = StoredNibbles; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores historical branch nodes for the storage trie of each account. + /// + /// Each entry is identified by a composite key combining the account’s hashed address and the + /// compact-encoded trie path. Versions are tracked using block numbers as subkeys. + table StorageTrieHistory { + type Key = StorageTrieKey; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores versioned account state across block history. + /// + /// Each entry maps a hashed account address to its serialized account data (balance, nonce, + /// code hash, storage root). + table HashedAccountHistory { + type Key = B256; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores versioned storage state across block history. + /// + /// Each entry maps a composite key of (hashed address, storage key) to its stored value. + /// Used for reconstructing contract storage at any historical block height. + table HashedStorageHistory { + type Key = HashedStorageKey; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Tracks the active proof window in the external historical storage. + /// + /// Stores the earliest and latest block numbers (and corresponding hashes) + /// for which historical trie data is retained. + table ProofWindow { + type Key = ProofWindowKey; + type Value = BlockNumberHash; + } + + /// A reverse mapping of block numbers to a keys of the tables. + /// This is used for efficiently locating data by block number. + table BlockChangeSet { + type Key = u64; // Block number + type Value = ChangeSet; + } +} diff --git a/rust/op-reth/crates/trie/src/db/models/storage.rs b/rust/op-reth/crates/trie/src/db/models/storage.rs new file mode 100644 index 0000000000000..922d252627a82 --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/models/storage.rs @@ -0,0 +1,256 @@ +use alloy_primitives::{B256, U256}; +use derive_more::{Constructor, From, Into}; +use reth_db::{ + DatabaseError, + table::{Compress, Decode, Decompress, Encode}, +}; +use reth_trie_common::StoredNibbles; +use serde::{Deserialize, Serialize}; + +/// Composite key: `(hashed-address, path)` for storage trie branches +/// +/// Used to efficiently index storage branches by both account address and trie path. +/// The encoding ensures lexicographic ordering: first by address, then by path. +#[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct StorageTrieKey { + /// Hashed account address + pub hashed_address: B256, + /// Trie path as nibbles + pub path: StoredNibbles, +} + +impl StorageTrieKey { + /// Create a new storage branch key + pub const fn new(hashed_address: B256, path: StoredNibbles) -> Self { + Self { hashed_address, path } + } +} + +impl Encode for StorageTrieKey { + type Encoded = Vec; + + fn encode(self) -> Self::Encoded { + let mut buf = Vec::with_capacity(32 + self.path.0.len()); + // First encode the address (32 bytes) + buf.extend_from_slice(self.hashed_address.as_slice()); + // Then encode the path + buf.extend_from_slice(&self.path.encode()); + buf + } +} + +impl Decode for StorageTrieKey { + fn decode(value: &[u8]) -> Result { + if value.len() < 32 { + return Err(DatabaseError::Decode); + } + + // First 32 bytes are the address + let hashed_address = B256::from_slice(&value[..32]); + + // Remaining bytes are the path + let path = StoredNibbles::decode(&value[32..])?; + + Ok(Self { hashed_address, path }) + } +} + +/// Composite key: (`hashed_address`, `hashed_storage_key`) for hashed storage values +/// +/// Used to efficiently index storage values by both account address and storage key. +/// The encoding ensures lexicographic ordering: first by address, then by storage key. +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct HashedStorageKey { + /// Hashed account address + pub hashed_address: B256, + /// Hashed storage key + pub hashed_storage_key: B256, +} + +impl HashedStorageKey { + /// Create a new hashed storage key + pub const fn new(hashed_address: B256, hashed_storage_key: B256) -> Self { + Self { hashed_address, hashed_storage_key } + } +} + +impl Encode for HashedStorageKey { + type Encoded = [u8; 64]; + + fn encode(self) -> Self::Encoded { + let mut buf = [0u8; 64]; + // First 32 bytes: address + buf[..32].copy_from_slice(self.hashed_address.as_slice()); + // Next 32 bytes: storage key + buf[32..].copy_from_slice(self.hashed_storage_key.as_slice()); + buf + } +} + +impl Decode for HashedStorageKey { + fn decode(value: &[u8]) -> Result { + if value.len() != 64 { + return Err(DatabaseError::Decode); + } + + let hashed_address = B256::from_slice(&value[..32]); + let hashed_storage_key = B256::from_slice(&value[32..64]); + + Ok(Self { hashed_address, hashed_storage_key }) + } +} + +/// Storage value wrapper for U256 values +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, From, Into, Constructor)] +pub struct StorageValue(pub U256); + +impl Compress for StorageValue { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + let be: [u8; 32] = self.0.to_be_bytes::<32>(); + buf.put_slice(&be); + } +} + +impl Decompress for StorageValue { + fn decompress(value: &[u8]) -> Result { + if value.len() != 32 { + return Err(DatabaseError::Decode); + } + let bytes: [u8; 32] = value.try_into().map_err(|_| DatabaseError::Decode)?; + Ok(Self(U256::from_be_bytes(bytes))) + } +} + +/// Proof Window key for tracking active proof window bounds +/// +/// Used to store earliest and latest block numbers in the external storage. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(u8)] +pub enum ProofWindowKey { + /// Earliest block number stored in external storage + EarliestBlock = 0, + /// Latest block number stored in external storage + LatestBlock = 1, + /// Anchor block from where the initial state initialization started + InitialStateAnchor = 2, +} + +impl Encode for ProofWindowKey { + type Encoded = [u8; 1]; + + fn encode(self) -> Self::Encoded { + [self as u8] + } +} + +impl Decode for ProofWindowKey { + fn decode(value: &[u8]) -> Result { + match value.first() { + Some(&0) => Ok(Self::EarliestBlock), + Some(&1) => Ok(Self::LatestBlock), + Some(&2) => Ok(Self::InitialStateAnchor), + _ => Err(DatabaseError::Decode), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_trie::Nibbles; + + #[test] + fn test_storage_branch_subkey_encode_decode() { + let addr = B256::from([1u8; 32]); + let path = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 2, 3, 4])); + let key = StorageTrieKey::new(addr, path.clone()); + + let encoded = key.clone().encode(); + let decoded = StorageTrieKey::decode(&encoded).unwrap(); + + assert_eq!(key, decoded); + assert_eq!(decoded.hashed_address, addr); + assert_eq!(decoded.path, path); + } + + #[test] + fn test_storage_branch_subkey_ordering() { + let addr1 = B256::from([1u8; 32]); + let addr2 = B256::from([2u8; 32]); + let path1 = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 2])); + let path2 = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 3])); + + let key1 = StorageTrieKey::new(addr1, path1.clone()); + let key2 = StorageTrieKey::new(addr1, path2); + let key3 = StorageTrieKey::new(addr2, path1); + + // Encoded bytes should be sortable: first by address, then by path + let enc1 = key1.encode(); + let enc2 = key2.encode(); + let enc3 = key3.encode(); + + assert!(enc1 < enc2, "Same address, path1 < path2"); + assert!(enc1 < enc3, "addr1 < addr2"); + assert!(enc2 < enc3, "addr1 < addr2 (even with larger path)"); + } + + #[test] + fn test_hashed_storage_subkey_encode_decode() { + let addr = B256::from([1u8; 32]); + let storage_key = B256::from([2u8; 32]); + let key = HashedStorageKey::new(addr, storage_key); + + let encoded = key.clone().encode(); + let decoded = HashedStorageKey::decode(&encoded).unwrap(); + + assert_eq!(key, decoded); + assert_eq!(decoded.hashed_address, addr); + assert_eq!(decoded.hashed_storage_key, storage_key); + } + + #[test] + fn test_hashed_storage_subkey_ordering() { + let addr1 = B256::from([1u8; 32]); + let addr2 = B256::from([2u8; 32]); + let storage1 = B256::from([10u8; 32]); + let storage2 = B256::from([20u8; 32]); + + let key1 = HashedStorageKey::new(addr1, storage1); + let key2 = HashedStorageKey::new(addr1, storage2); + let key3 = HashedStorageKey::new(addr2, storage1); + + // Encoded bytes should be sortable: first by address, then by storage key + let enc1 = key1.encode(); + let enc2 = key2.encode(); + let enc3 = key3.encode(); + + assert!(enc1 < enc2, "Same address, storage1 < storage2"); + assert!(enc1 < enc3, "addr1 < addr2"); + assert!(enc2 < enc3, "addr1 < addr2 (even with larger storage key)"); + } + + #[test] + fn test_hashed_storage_subkey_size() { + let addr = B256::from([1u8; 32]); + let storage_key = B256::from([2u8; 32]); + let key = HashedStorageKey::new(addr, storage_key); + + let encoded = key.encode(); + assert_eq!(encoded.len(), 64, "Encoded size should be exactly 64 bytes"); + } + + #[test] + fn test_metadata_key_encode_decode() { + let key = ProofWindowKey::EarliestBlock; + let encoded = key.encode(); + let decoded = ProofWindowKey::decode(&encoded).unwrap(); + assert_eq!(key, decoded); + + let key = ProofWindowKey::LatestBlock; + let encoded = key.encode(); + let decoded = ProofWindowKey::decode(&encoded).unwrap(); + assert_eq!(key, decoded); + } +} diff --git a/rust/op-reth/crates/trie/src/db/models/version.rs b/rust/op-reth/crates/trie/src/db/models/version.rs new file mode 100644 index 0000000000000..26f985c9674f6 --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/models/version.rs @@ -0,0 +1,191 @@ +use bytes::{Buf, BufMut}; +use reth_db::{ + DatabaseError, + table::{Compress, Decompress}, +}; +use reth_primitives_traits::ValueWithSubKey; +use serde::{Deserialize, Serialize}; + +/// Wrapper type for `Option` that implements [`Compress`] and [`Decompress`] +/// +/// Encoding: +/// - `None` => empty byte array (length 0) +/// - `Some(value)` => compressed bytes of value (length > 0) +/// +/// This assumes the inner type `T` always compresses to non-empty bytes when it exists. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct MaybeDeleted(pub Option); + +impl From> for MaybeDeleted { + fn from(opt: Option) -> Self { + Self(opt) + } +} + +impl From> for Option { + fn from(maybe: MaybeDeleted) -> Self { + maybe.0 + } +} + +impl Compress for MaybeDeleted { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + match &self.0 { + None => { + // Empty = deleted, write nothing + } + Some(value) => { + // Compress the inner value to the buffer + value.compress_to_buf(buf); + } + } + } +} + +impl Decompress for MaybeDeleted { + fn decompress(value: &[u8]) -> Result { + if value.is_empty() { + // Empty = deleted + Ok(Self(None)) + } else { + // Non-empty = present + let inner = T::decompress(value)?; + Ok(Self(Some(inner))) + } + } +} + +/// Versioned value wrapper for [`DupSort`] tables +/// +/// For [`DupSort`] tables in MDBX, the Value type must contain the [`DupSort::SubKey`] as a field. +/// This wrapper combines a [`block_number`] (the [`DupSort::SubKey`]) with +/// the actual value. +/// +/// [`DupSort`]: reth_db::table::DupSort +/// [`DupSort::SubKey`]: reth_db::table::DupSort::SubKey +/// [`block_number`]: Self::block_number +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct VersionedValue { + /// Block number ([`DupSort::SubKey`] for [`DupSort`]) + /// + /// [`DupSort`]: reth_db::table::DupSort + /// [`DupSort::SubKey`]: reth_db::table::DupSort::SubKey + pub block_number: u64, + /// The actual value (may be deleted) + pub value: MaybeDeleted, +} + +impl VersionedValue { + /// Create a new versioned value + pub const fn new(block_number: u64, value: MaybeDeleted) -> Self { + Self { block_number, value } + } +} + +impl Compress for VersionedValue { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + // Encode block number first (8 bytes, big-endian) + buf.put_u64(self.block_number); + // Then encode the value + self.value.compress_to_buf(buf); + } +} + +impl Decompress for VersionedValue { + fn decompress(value: &[u8]) -> Result { + if value.len() < 8 { + return Err(DatabaseError::Decode); + } + + let mut buf: &[u8] = value; + let block_number = buf.get_u64(); + let value = MaybeDeleted::::decompress(&value[8..])?; + + Ok(Self { block_number, value }) + } +} + +impl ValueWithSubKey for VersionedValue { + type SubKey = u64; + + fn get_subkey(&self) -> Self::SubKey { + self.block_number + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives_traits::Account; + use reth_trie::BranchNodeCompact; + + #[test] + fn test_maybe_deleted_none() { + let none: MaybeDeleted = MaybeDeleted(None); + let compressed = none.compress(); + assert!(compressed.is_empty(), "None should compress to empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, None); + } + + #[test] + fn test_maybe_deleted_some_account() { + let account = Account { + nonce: 42, + balance: alloy_primitives::U256::from(1000u64), + bytecode_hash: None, + }; + let some = MaybeDeleted(Some(account)); + let compressed = some.compress(); + assert!(!compressed.is_empty(), "Some should compress to non-empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, Some(account)); + } + + #[test] + fn test_maybe_deleted_some_branch() { + // Create a simple valid BranchNodeCompact (empty is valid) + let branch = BranchNodeCompact::new( + 0, // state_mask + 0, // tree_mask + 0, // hash_mask + vec![], // hashes + None, // root_hash + ); + let some = MaybeDeleted(Some(branch.clone())); + let compressed = some.compress(); + assert!(!compressed.is_empty(), "Some should compress to non-empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, Some(branch)); + } + + #[test] + fn test_maybe_deleted_roundtrip() { + let test_cases = vec![ + MaybeDeleted(None), + MaybeDeleted(Some(Account { + nonce: 0, + balance: alloy_primitives::U256::ZERO, + bytecode_hash: None, + })), + MaybeDeleted(Some(Account { + nonce: 999, + balance: alloy_primitives::U256::MAX, + bytecode_hash: Some([0xff; 32].into()), + })), + ]; + + for original in test_cases { + let compressed = original.clone().compress(); + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(original, decompressed); + } + } +} diff --git a/rust/op-reth/crates/trie/src/db/schema.md b/rust/op-reth/crates/trie/src/db/schema.md new file mode 100644 index 0000000000000..4c2b04fd6965b --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/schema.md @@ -0,0 +1,366 @@ +# Proof History Database Schema + +> Location: `crates/optimism/trie/src/db` +> Backend: **MDBX** (via `reth-db`) +> Purpose: Serve **historical `eth_getProof`** by storing versioned trie data in a bounded window. + +--- + +## Design Overview + +This database is a **versioned, append-only history store** for Ethereum state tries. + +Each logical key is stored with **multiple historical versions**, each tagged by a **block number**. Reads select the latest version whose block number is **≤ the requested block**. + +### Core principles + +* History tables are **DupSort** tables +* Each entry is versioned by `block_number` +* Deletions are encoded as **tombstones** +* A reverse index (`BlockChangeSet`) enables **range pruning** +* Proof window bounds are tracked explicitly + +--- + +## Version Encoding + +All historical values are wrapped in `VersionedValue`. + +### `VersionedValue` + +| Field | Type | Encoding | +| -------------- | ----------------- | -------------------- | +| `block_number` | `u64` | big-endian (8 bytes) | +| `value` | `MaybeDeleted` | see below | + +``` +VersionedValue := block_number || maybe_deleted_value +``` + +--- + +### `MaybeDeleted` + +Encodes value presence or deletion: + +| Logical value | Encoding | +| ------------- | ---------------------------- | +| `Some(T)` | `T::compress()` | +| `None` | empty byte slice (`len = 0`) | + +An empty value represents **deletion at that block**. + +--- + +## Tables + +--- + +## 1. `AccountTrieHistory` (DupSort) + +Historical **branch nodes** of the **account trie**. + +### Purpose + +Reconstruct account trie structure at any historical block. + +### Schema + +| Component | Type | +| --------- | ----------------------------------- | +| Key | `StoredNibbles` | +| SubKey | `u64` (block number) | +| Value | `VersionedValue` | + +### Key encoding + +* `StoredNibbles` = compact-encoded trie path + +### Semantics + +For a given trie path: + +* Multiple versions may exist +* Reader selects highest `block_number ≤ target_block` + +--- + +## 2. `StorageTrieHistory` (DupSort) + +Historical **branch nodes** of **per-account storage tries**. + +### Schema + +| Component | Type | +| --------- | ----------------------------------- | +| Key | `StorageTrieKey` | +| SubKey | `u64` | +| Value | `VersionedValue` | + +### `StorageTrieKey` encoding + +``` +StorageTrieKey := + hashed_address (32 bytes) + || StoredNibbles::encode(path) +``` + +Ordering: + +1. `hashed_address` +2. trie path bytes + +--- + +## 3. `HashedAccountHistory` (DupSort) + +Historical **account leaf values**. + +### Schema + +| Component | Type | +| --------- | ------------------------- | +| Key | `B256` (hashed address) | +| SubKey | `u64` | +| Value | `VersionedValue` | + +### Semantics + +Stores nonce, balance, code hash, and storage root per account per block. + +--- + +## 4. `HashedStorageHistory` (DupSort) + +Historical **storage slot values**. + +### Schema + +| Component | Type | +| --------- | ------------------------------ | +| Key | `HashedStorageKey` | +| SubKey | `u64` | +| Value | `VersionedValue` | + +### `HashedStorageKey` encoding + +Fixed 64 bytes: + +``` +hashed_address (32 bytes) || hashed_storage_key (32 bytes) +``` + +### `StorageValue` encoding + +* Wraps `U256` +* Encoded as **32-byte big-endian** + +--- + +## 5. `BlockChangeSet` + +Reverse index of **which keys were modified in a block**. + +### Purpose + +Efficient pruning by block range. + +### Schema + +| Component | Type | +| --------- | -------------------- | +| Key | `u64` (block number) | +| Value | `ChangeSet` | + +### `ChangeSet` structure + +```rust +pub struct ChangeSet { + pub account_trie_keys: Vec, + pub storage_trie_keys: Vec, + pub hashed_account_keys: Vec, + pub hashed_storage_keys: Vec, +} +``` + +### Encoding + +* Serialized using **bincode** + +--- + +## 6. `ProofWindow` + +Tracks active proof window bounds. + +### Schema + +| Component | Type | +| --------- | ----------------- | +| Key | `ProofWindowKey` | +| Value | `BlockNumberHash` | + +### `ProofWindowKey` + +| Variant | Encoding | +| --------------- | -------- | +| `EarliestBlock` | `0u8` | +| `LatestBlock` | `1u8` | + +### `BlockNumberHash` encoding + +``` +block_number (u64 BE, 8 bytes) +|| block_hash (B256, 32 bytes) +``` + +Total size: **40 bytes** + +--- +Here is a **short, clean, professional** version suitable for `SCHEMA.md`: + +--- + +## Reads: Hashed & Trie Cursors + +Historical reads are performed using **hashed cursors** and **trie cursors**, both operating on versioned history tables. + +All reads follow the same rule: + +> Select the newest entry whose block number is **≤ the requested block**. + +--- + +### Hashed Cursors + +Hashed cursors read **leaf values** from: + +* `HashedAccountHistory` +* `HashedStorageHistory` + +They answer: + +> *What was the value of this account or storage slot at block B?* + +For a given key, the cursor scans historical versions and returns the latest valid value. Tombstones indicate deletion and are treated as non-existence. + +--- + +### Trie Cursors + +Trie cursors read **trie branch nodes** from: + +* `AccountTrieHistory` +* `StorageTrieHistory` + +They answer: + +> *Which trie nodes existed at this path at block B?* + +These cursors enable reconstruction of Merkle paths required for proof generation. + +--- + +### Combined Usage + +When serving `eth_getProof`: + +* Trie cursors reconstruct the Merkle path +* Hashed cursors supply the leaf values + +Both are evaluated at the same target explained block to produce deterministic historical proofs. + + +--- +## Writes: `store_trie_updates` (Append-Only) + +`store_trie_updates` persists all state changes introduced by a block using a strictly **append-only** write model. + + +### Purpose + +The function records **historical trie updates** so that state and proofs can be reconstructed at any later block. + +--- + +### What is written + +For a processed block `B`, the following data is appended: + +* Account trie branch nodes → `AccountTrieHistory` +* Storage trie branch nodes → `StorageTrieHistory` +* Account leaf updates → `HashedAccountHistory` +* Storage slot updates → `HashedStorageHistory` +* Modified keys → `BlockChangeSet[B]` + +All entries are tagged with the same `block_number`. + +--- + +### How writes work + +For each updated item: + +1. A `VersionedValue` is created with: + + * `block_number = B` + * the encoded node or value + +2. The entry is appended to the corresponding history table. + +No existing entries are modified or replaced. + + +--- +Here is a **concise, professional, SCHEMA.md-style** explanation aligned exactly with your clarification: + +--- + +## Initial State Backfill + +### Source database (Reth) + +The initial state is sourced from **Reth’s main execution database**, which only contains data for the **current canonical state**. + +The backfill reads from the following Reth tables: + +* `HashedAccounts` – current account leaf values +* `HashedStorages` – current storage slot values +* `AccountsTrie` – current account trie branch nodes +* `StoragesTrie` – current storage trie branch nodes + +These tables do **not** contain historical versions; they represent a single finalized state snapshot. + +--- + +### Destination database (Proofs storage) + +The data is copied into the **proofs history database** (`OpProofsStore`), which is a **versioned, append-only** store designed for historical proof generation. + +--- + +### How the initial state is created + +During backfill: + +1. The current state is fully scanned from Reth tables using read-only cursors. +2. All entries are written into the proofs storage as versioned records. +3. This creates a complete **baseline state** inside the proofs DB. + +The backfill runs only once and is skipped if the proofs DB already has an `earliest_block` set. + +--- + +### Why block `0` is used + +Since Reth tables only represent the **current state**, the copied data must be assigned a synthetic version. + +Block **`0`** is used as the baseline version because: + +* It is ≤ any real block number +* It establishes a stable initial version for all keys +* Later block updates naturally override it using higher block numbers + +This makes block `0` the canonical **initial state anchor** for versioned reads. + +--- diff --git a/rust/op-reth/crates/trie/src/db/store.rs b/rust/op-reth/crates/trie/src/db/store.rs new file mode 100644 index 0000000000000..62d801ffc7c2c --- /dev/null +++ b/rust/op-reth/crates/trie/src/db/store.rs @@ -0,0 +1,3675 @@ +use super::{BlockNumberHash, ProofWindow, ProofWindowKey, Tables}; +use crate::{ + BlockStateDiff, OpProofsStorageError, + OpProofsStorageError::NoBlocksFound, + OpProofsStorageResult, OpProofsStore, + api::{InitialStateAnchor, InitialStateStatus, OpProofsInitialStateStore, WriteCounts}, + db::{ + MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, + cursor::Dup, + models::{ + AccountTrieHistory, BlockChangeSet, ChangeSet, HashedAccountHistory, + HashedStorageHistory, HashedStorageKey, MaybeDeleted, StorageTrieHistory, + StorageTrieKey, StorageValue, VersionedValue, kv::IntoKV, + }, + }, +}; +use alloy_eips::{BlockNumHash, NumHash, eip1898::BlockWithParent}; +use alloy_primitives::{B256, U256, map::HashMap}; +#[cfg(feature = "metrics")] +use metrics::{Label, gauge}; +use reth_db::{ + Database, DatabaseEnv, DatabaseError, + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + mdbx::{DatabaseArguments, init_db_for}, + table::{DupSort, Table}, + transaction::{DbTx, DbTxMut}, +}; +use reth_primitives_traits::Account; +use reth_trie::{hashed_cursor::HashedCursor, trie_cursor::TrieCursor}; +use reth_trie_common::{ + BranchNodeCompact, HashedPostState, Nibbles, StoredNibbles, + updates::{StorageTrieUpdates, TrieUpdates}, +}; +use std::{ops::RangeBounds, path::Path}; + +/// MDBX implementation of [`OpProofsStore`]. +#[derive(Debug)] +pub struct MdbxProofsStorage { + env: DatabaseEnv, +} + +struct ProofWindowValue { + earliest: NumHash, + latest: NumHash, +} + +/// Preprocessed prune plan for a target block number +#[derive(Debug, Clone)] +struct PrunePlan { + earliest_block: u64, + acc_survivors: Vec<(StoredNibbles, u64)>, + storage_survivors: Vec<(StorageTrieKey, u64)>, + hashed_acc_survivors: Vec<(B256, u64)>, + hashed_storage_survivors: Vec<(HashedStorageKey, u64)>, +} + +/// Preprocessed delete work for a prune range +#[derive(Debug, Default, Clone)] +struct HistoryDeleteBatch { + account_trie: Vec<(::Key, u64)>, + storage_trie: Vec<(::Key, u64)>, + hashed_account: Vec<(::Key, u64)>, + hashed_storage: Vec<(::Key, u64)>, +} + +impl MdbxProofsStorage { + /// Creates a new [`MdbxProofsStorage`] instance with the given path. + pub fn new(path: &Path) -> Result { + let env = init_db_for::<_, Tables>(path, DatabaseArguments::default()) + .map_err(|e| DatabaseError::Other(format!("Failed to open database: {e}")))?; + Ok(Self { env }) + } + + fn inner_get_latest_block_number_hash( + &self, + tx: &impl DbTx, + ) -> OpProofsStorageResult> { + let block = self.inner_get_block_number_hash(tx, ProofWindowKey::LatestBlock)?; + if block.is_some() { + return Ok(block); + } + + self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock) + } + + fn inner_get_block_number_hash( + &self, + tx: &impl DbTx, + key: ProofWindowKey, + ) -> OpProofsStorageResult> { + let mut cursor = tx.cursor_read::()?; + let value = cursor.seek_exact(key)?; + Ok(value.map(|(_, val)| (val.number(), *val.hash()))) + } + + fn inner_get_proof_window( + &self, + tx: &impl DbTx, + ) -> OpProofsStorageResult> { + let mut cursor = tx.cursor_read::()?; + + let earliest = match cursor.seek_exact(ProofWindowKey::EarliestBlock)? { + Some((_, val)) => NumHash::new(val.number(), *val.hash()), + None => return Ok(None), + }; + + let latest = match cursor.seek_exact(ProofWindowKey::LatestBlock)? { + Some((_, val)) => NumHash::new(val.number(), *val.hash()), + None => earliest, + }; + + Ok(Some(ProofWindowValue { earliest, latest })) + } + + fn set_earliest_block_number_hash( + &self, + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + let _ = self.env.update(|tx| { + Self::inner_set_earliest_block_number(tx, block_number, hash)?; + Ok::<(), DatabaseError>(()) + })?; + Ok(()) + } + + /// Internal helper to set earliest block number hash within an existing transaction + fn inner_set_earliest_block_number( + tx: &(impl DbTxMut + DbTx), + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + let mut cursor = tx.cursor_write::()?; + cursor.upsert(ProofWindowKey::EarliestBlock, &BlockNumberHash::new(block_number, hash))?; + Ok(()) + } + + /// Internal helper to set latest block number hash within an existing transaction + fn inner_set_latest_block_number( + tx: &(impl DbTxMut + DbTx), + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + let mut cursor = tx.cursor_write::()?; + cursor.upsert(ProofWindowKey::LatestBlock, &BlockNumberHash::new(block_number, hash))?; + Ok(()) + } + + /// Persist a batch of versioned history entries to a dup-sorted table. + /// + /// # Parameters + /// - `block_number`: Target block number for versioning entries + /// - `items`: **Must be sorted** - iterator of entries to persist + /// - `append_mode`: Mode selector for write strategy: + /// - `true` (Append): Appends all entries including tombstones for forward progress + /// - `false` (Prune): Removes tombstones, writes non-tombstones to block 0 + /// + /// The cost of pruning is the cost of (append + deleting tombstones + deleting old block 0). + /// The tombstones deletion is expensive as it requires a seek for each (key + subkey). + /// + /// Uses [`reth_db::mdbx::cursor::Cursor::upsert`] for upsert operation. + fn persist_history_batch( + &self, + tx: &(impl DbTxMut + DbTx), + block_number: T::SubKey, + items: I, + append_mode: bool, + ) -> OpProofsStorageResult> + where + T: Table> + DupSort, + T::Key: Clone, + I: IntoIterator, + I::Item: IntoKV, + { + let mut cur = tx.cursor_dup_write::()?; + let mut keys = Vec::::new(); + + // Materialize iterator to enable partitioning and collect keys + let mut pairs: Vec<(T::Key, T::Value)> = Vec::new(); + for it in items { + let (k, vv) = it.into_kv(block_number); + pairs.push((k.clone(), vv)); + keys.push(k) + } + + if append_mode { + // Append all entries (including tombstones) to preserve full history + for (k, vv) in pairs { + cur.append_dup(k.clone(), vv)?; + } + return Ok(keys); + } + + // Drop current cursor to start clean for Phase 1 + drop(cur); + + // Phase 1: Batch Delete (Sequential) + // Remove all existing state at Block 0 for these keys. + { + let mut del_cur = tx.cursor_dup_write::()?; + for (k, _) in &pairs { + // Seek to (Key, Block 0) + if let Some(vv) = del_cur.seek_by_key_subkey(k.clone(), 0)? && + vv.block_number == 0 + { + del_cur.delete_current()?; + } + } + } + + // Phase 2: Batch Write (Sequential) + // Write new values (skipping tombstones). + { + let mut write_cur = tx.cursor_dup_write::()?; + for (k, vv) in pairs { + if vv.value.0.is_some() { + write_cur.upsert(k, &vv)?; + } + } + } + + Ok(keys) + } + + /// Delete entries for `items` at exactly `block_number` in a dup-sorted table. + /// Seeks (key, block) and deletes current if the subkey matches. + fn delete_dup_sorted( + &self, + tx: &(impl DbTxMut + DbTx), + items: I, + ) -> OpProofsStorageResult<()> + where + T: Table> + DupSort, + T::Key: Clone, + T::SubKey: PartialEq + Clone, + I: IntoIterator, + { + let mut cur = tx.cursor_dup_write::()?; + for (key, subkey) in items { + if let Some(vv) = cur.seek_by_key_subkey(key, subkey)? { + // ensure we didn't land on a >subkey + if vv.block_number == subkey { + cur.delete_current()?; + } + } + } + Ok(()) + } + + /// Phase 1 of pruning: Calculate survivors. + /// Scans change sets to find the LATEST update for every key in the range. + fn calculate_prune_plan(&self, target_block: u64) -> OpProofsStorageResult> { + self.env.view(|tx| { + let Some((earliest, _)) = + self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock)? + else { + return Ok(None); + }; + + if earliest >= target_block { + return Ok(None); + } + + // 1. Accumulate latest block per key using HashMap for O(1) deduplication + // This is memory-efficient for high-churn scenarios (many updates to same keys). + let mut acc_candidates: HashMap = HashMap::default(); + let mut storage_candidates: HashMap = HashMap::default(); + let mut hashed_acc_candidates: HashMap = HashMap::default(); + let mut hashed_storage_candidates: HashMap = HashMap::default(); + + let range = (earliest + 1)..=target_block; + let mut cs_cursor = tx.cursor_read::()?; + let mut walker = cs_cursor.walk_range(range)?; + + while let Some(Ok((block_number, cs))) = walker.next() { + for k in cs.account_trie_keys { + acc_candidates + .entry(k) + .and_modify(|curr| *curr = (*curr).max(block_number)) + .or_insert(block_number); + } + for k in cs.storage_trie_keys { + storage_candidates + .entry(k) + .and_modify(|curr| *curr = (*curr).max(block_number)) + .or_insert(block_number); + } + for k in cs.hashed_account_keys { + hashed_acc_candidates + .entry(k) + .and_modify(|curr| *curr = (*curr).max(block_number)) + .or_insert(block_number); + } + for k in cs.hashed_storage_keys { + hashed_storage_candidates + .entry(k) + .and_modify(|curr| *curr = (*curr).max(block_number)) + .or_insert(block_number); + } + } + + // 2. Convert map to sorted survivors list for efficient sequential db write + Ok(Some(PrunePlan { + earliest_block: earliest, + acc_survivors: Self::flatten_and_sort(acc_candidates), + storage_survivors: Self::flatten_and_sort(storage_candidates), + hashed_acc_survivors: Self::flatten_and_sort(hashed_acc_candidates), + hashed_storage_survivors: Self::flatten_and_sort(hashed_storage_candidates), + })) + })? + } + + /// Helper to flatten `HashMap` into a sorted Vector of survivors. + /// Sorting is required to ensure optimal sequential seek performance in MDBX. + fn flatten_and_sort(map: HashMap) -> Vec<(K, u64)> { + let mut v: Vec<_> = map.into_iter().collect(); + v.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + v + } + + /// Delete history versions for `items` that are strictly older than the provided block number. + /// `items` is a list of (Key, `SurvivorBlock`). Everything strictly older than `SurvivorBlock` + /// is deleted. Returns the number of entries deleted. + fn prune_history_preceding( + &self, + tx: &(impl DbTxMut + DbTx), + cutoff_items: Vec<(T::Key, u64)>, + ) -> OpProofsStorageResult + where + T: Table> + DupSort, + T::Key: Clone + Ord, + { + if cutoff_items.is_empty() { + return Ok(0); + } + + let mut deleted_count = 0; + let mut cur = tx.cursor_dup_write::()?; + for (key, survivor_block) in cutoff_items { + // Seek to the start of history for this key (Block 0) + if let Some(mut entry) = cur.seek_by_key_subkey(key.clone(), 0)? { + loop { + if entry.block_number >= survivor_block { + // Reached the survivor version (or newer). Stop deleting for this key. + + // If the survivor is a tombstone (None), delete it too. + // Since we just deleted all older history, a tombstone at the start of + // history is redundant (it implies "does not + // exist"). + if entry.block_number == survivor_block && entry.value.0.is_none() { + cur.delete_current()?; + deleted_count += 1; + } + + break; + } + + // Entry is strictly older than survivor. Delete it. + cur.delete_current()?; + deleted_count += 1; + + // MDBX delete_current() automatically advances the cursor to the next item. + // We check if the next item is still the same key. + match cur.current() { + Ok(Some((k, v))) => { + if k != key { + break; // Moved past the key + } + entry = v; + } + _ => break, // End of table or error + } + } + } + } + Ok(deleted_count) + } + + /// Append deletion tombstones for all existing storage items of `hashed_address` at + /// `block_number`. Iterates via `next()` from a RO cursor and writes MaybeDeleted(None) + /// rows. + fn wipe_storage( + &self, + tx: &(impl DbTxMut + DbTx), + block_number: u64, + hashed_address: B256, + mut next: Next, + ) -> OpProofsStorageResult> + where + T: Table> + DupSort, + Next: FnMut() -> OpProofsStorageResult>, + (B256, K, Option): IntoKV, + T::Key: Clone, + { + let mut cur = tx.cursor_dup_write::()?; + let mut keys: Vec = Vec::new(); + + while let Some((k, _vv)) = next()? { + let key: T::Key = (hashed_address, k, Option::::None).into_key(); + let del: T::Value = VersionedValue { block_number, value: MaybeDeleted(None) }; + cur.append_dup(key.clone(), del)?; + keys.push(key); + } + + Ok(keys) + } + + /// Collect versioned history over `block_range` using `BlockChangeSet`. + fn collect_history_ranged( + &self, + tx: &impl DbTx, + block_range: impl RangeBounds, + ) -> OpProofsStorageResult { + let mut history = HistoryDeleteBatch::default(); + let mut change_set_cursor = tx.cursor_read::()?; + let mut walker = change_set_cursor.walk_range(block_range)?; + + while let Some(Ok((block_number, change_set))) = walker.next() { + // Push (key, subkey=block_number) pairs + history + .account_trie + .extend(change_set.account_trie_keys.into_iter().map(|k| (k, block_number))); + history + .storage_trie + .extend(change_set.storage_trie_keys.into_iter().map(|k| (k, block_number))); + history + .hashed_account + .extend(change_set.hashed_account_keys.into_iter().map(|k| (k, block_number))); + history + .hashed_storage + .extend(change_set.hashed_storage_keys.into_iter().map(|k| (k, block_number))); + } + + // Sorting by tuple sorts by key first, then by block_number. + history.account_trie.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + history.storage_trie.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + history.hashed_account.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + history.hashed_storage.sort_by(|(k1, b1), (k2, b2)| k1.cmp(k2).then_with(|| b1.cmp(b2))); + + Ok(history) + } + + /// Delete versioned history over `block_range` using history batch. + fn delete_history_ranged( + &self, + tx: &(impl DbTxMut + DbTx), + block_range: impl RangeBounds, + history: HistoryDeleteBatch, + ) -> OpProofsStorageResult { + let mut change_set_cursor = tx.cursor_write::()?; + let mut walker = change_set_cursor.walk_range(block_range)?; + + while let Some(Ok((_, _))) = walker.next() { + walker.delete_current()?; + } + + // Delete using the simplified API: iterator of (key, subkey) + self.delete_dup_sorted::(tx, history.clone().account_trie)?; + self.delete_dup_sorted::(tx, history.clone().storage_trie)?; + self.delete_dup_sorted::(tx, history.clone().hashed_account)?; + self.delete_dup_sorted::(tx, history.clone().hashed_storage)?; + + Ok(WriteCounts { + account_trie_updates_written_total: history.account_trie.len() as u64, + storage_trie_updates_written_total: history.storage_trie.len() as u64, + hashed_accounts_written_total: history.hashed_account.len() as u64, + hashed_storages_written_total: history.hashed_storage.len() as u64, + }) + } + + /// Write trie/state history for `block_number` from `block_state_diff`. + fn store_trie_updates_for_block( + &self, + tx: &::TXMut, + block_number: u64, + block_state_diff: BlockStateDiff, + append_mode: bool, + ) -> OpProofsStorageResult { + let BlockStateDiff { sorted_trie_updates, sorted_post_state } = block_state_diff; + + let storage_trie_len = sorted_trie_updates.storage_tries_ref().len(); + let hashed_storage_len = sorted_post_state.storages.len(); + + let account_trie_keys = self.persist_history_batch( + tx, + block_number, + sorted_trie_updates.account_nodes_ref().iter().cloned(), + append_mode, + )?; + let hashed_account_keys = self.persist_history_batch( + tx, + block_number, + sorted_post_state.accounts.iter().copied(), + append_mode, + )?; + + let mut storage_trie_keys = Vec::::with_capacity(storage_trie_len); + for (hashed_address, nodes) in sorted_trie_updates.storage_tries_ref() { + // Handle wiped - mark all storage trie as deleted at the current block number + if nodes.is_deleted && append_mode { + // Yet to have any update for the current block number - So just using up to + // previous block number + let mut ro = self.storage_trie_cursor(*hashed_address, block_number - 1)?; + let keys = + self.wipe_storage(tx, block_number, *hashed_address, || Ok(ro.next()?))?; + + storage_trie_keys.extend(keys); + + // Skip any further processing for this hashed_address + continue; + } + + let keys = self.persist_history_batch( + tx, + block_number, + nodes + .storage_nodes_ref() + .iter() + .cloned() + .map(|(path, node)| (*hashed_address, path, node)), + append_mode, + )?; + storage_trie_keys.extend(keys); + } + + let mut hashed_storage_keys = Vec::::with_capacity(hashed_storage_len); + for (hashed_address, storage) in sorted_post_state.storages { + // Handle wiped - mark all storage slots as deleted at the current block number + if append_mode && storage.is_wiped() { + // Yet to have any update for the current block number - So just using up to + // previous block number + let mut ro = self.storage_hashed_cursor(hashed_address, block_number - 1)?; + let keys = + self.wipe_storage(tx, block_number, hashed_address, || Ok(ro.next()?))?; + hashed_storage_keys.extend(keys); + // Skip any further processing for this hashed_address + continue; + } + let keys = self.persist_history_batch( + tx, + block_number, + storage + .storage_slots_ref() + .iter() + .map(|(key, val)| (hashed_address, *key, Some(StorageValue(*val)))), + append_mode, + )?; + hashed_storage_keys.extend(keys); + } + + Ok(ChangeSet { + account_trie_keys, + storage_trie_keys, + hashed_account_keys, + hashed_storage_keys, + }) + } + + /// Append-only writer for a block: validates parent, persists diff (soft-delete=true), + /// records a `BlockChangeSet`, and advances `ProofWindow::LatestBlock`. + fn store_trie_updates_append_only( + &self, + tx: &::TXMut, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> OpProofsStorageResult { + let block_number = block_ref.block.number; + + // Check the latest stored block is the parent of the incoming block + let latest_block_hash = + self.inner_get_latest_block_number_hash(tx)?.map_or(B256::ZERO, |(_num, hash)| hash); + + if latest_block_hash != block_ref.parent { + return Err(OpProofsStorageError::OutOfOrder { + block_number, + parent_block_hash: block_ref.parent, + latest_block_hash, + }); + } + + let change_set = + &self.store_trie_updates_for_block(tx, block_number, block_state_diff, true)?; + + // Cursor for recording all changes made in this block for all history tables + let mut change_set_cursor = tx.new_cursor::()?; + change_set_cursor.append(block_number, change_set)?; + + // Update proof window's latest block + Self::inner_set_latest_block_number(tx, block_number, block_ref.block.hash)?; + + Ok(WriteCounts { + account_trie_updates_written_total: change_set.account_trie_keys.len() as u64, + storage_trie_updates_written_total: change_set.storage_trie_keys.len() as u64, + hashed_accounts_written_total: change_set.hashed_account_keys.len() as u64, + hashed_storages_written_total: change_set.hashed_storage_keys.len() as u64, + }) + } + + /// Return `BlockNumHash` for the initial state anchor. + fn get_initial_state_anchor(&self) -> OpProofsStorageResult> { + self.env.view(|tx| { + let mut cur = tx.cursor_read::()?; + Ok(cur.seek_exact(ProofWindowKey::InitialStateAnchor)?.map(|(_k, v)| v.into())) + })? + } + + /// Return latest key for a table + fn get_latest_key(&self) -> OpProofsStorageResult> + where + T: Table, + { + self.env.view(|tx| { + let mut cursor = tx.cursor_read::()?; + Ok(cursor.last()?.map(|(k, _)| k)) + })? + } +} + +impl OpProofsStore for MdbxProofsStorage { + type StorageTrieCursor<'tx> + = MdbxTrieCursor> + where + Self: 'tx; + type AccountTrieCursor<'tx> + = MdbxTrieCursor> + where + Self: 'tx; + type StorageCursor<'tx> + = MdbxStorageCursor> + where + Self: 'tx; + type AccountHashedCursor<'tx> + = MdbxAccountCursor> + where + Self: 'tx; + + fn get_earliest_block_number(&self) -> OpProofsStorageResult> { + self.env.view(|tx| self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock))? + } + + fn get_latest_block_number(&self) -> OpProofsStorageResult> { + self.env.view(|tx| self.inner_get_latest_block_number_hash(tx))? + } + + fn storage_trie_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxTrieCursor::new(cursor, max_block_number, Some(hashed_address))) + } + + fn account_trie_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxTrieCursor::new(cursor, max_block_number, None)) + } + + fn storage_hashed_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxStorageCursor::new(cursor, max_block_number, hashed_address)) + } + + fn account_hashed_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxAccountCursor::new(cursor, max_block_number)) + } + + fn store_trie_updates( + &self, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> OpProofsStorageResult { + self.env + .update(|tx| self.store_trie_updates_append_only(tx, block_ref, block_state_diff))? + } + + fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { + self.env.view(|tx| { + let mut change_set_cursor = tx.cursor_read::()?; + let (_, change_set) = change_set_cursor + .seek_exact(block_number)? + .ok_or(OpProofsStorageError::NoChangeSetForBlock(block_number))?; + + let mut account_trie_cursor = tx.new_cursor::()?; + let mut storage_trie_cursor = tx.new_cursor::()?; + let mut hashed_account_cursor = tx.new_cursor::()?; + let mut hashed_storage_cursor = tx.new_cursor::()?; + + let mut trie_updates = TrieUpdates::default(); + for key in change_set.account_trie_keys { + let entry = + match account_trie_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingAccountTrieHistory( + key.0, + block_number, + )); + } + }; + + if let Some(value) = entry { + trie_updates.account_nodes.insert(key.0, value); + } else { + trie_updates.removed_nodes.insert(key.0); + } + } + + for key in change_set.storage_trie_keys { + let entry = + match storage_trie_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingStorageTrieHistory( + key.hashed_address, + key.path.0, + block_number, + )); + } + }; + + let stu = trie_updates + .storage_tries + .entry(key.hashed_address) + .or_insert_with(StorageTrieUpdates::default); + + // handle is_deleted scenario + // Issue: https://github.com/op-rs/op-reth/issues/323 + if let Some(value) = entry { + stu.storage_nodes.insert(key.path.0, value); + } else { + stu.removed_nodes.insert(key.path.0); + } + } + + let mut post_state = + HashedPostState::with_capacity(change_set.hashed_account_keys.len()); + for key in change_set.hashed_account_keys { + let entry = match hashed_account_cursor.seek_by_key_subkey(key, block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingHashedAccountHistory( + key, + block_number, + )); + } + }; + + post_state.accounts.insert(key, entry); + } + + for key in change_set.hashed_storage_keys { + let entry = + match hashed_storage_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingHashedStorageHistory { + hashed_address: key.hashed_address, + hashed_storage_key: key.hashed_storage_key, + block_number, + }); + } + }; + + let hs = post_state.storages.entry(key.hashed_address).or_default(); + + // handle wiped storage scenario + // Issue: https://github.com/op-rs/op-reth/issues/323 + if let Some(value) = entry { + hs.storage.insert(key.hashed_storage_key, value.0); + } else { + hs.storage.insert(key.hashed_storage_key, U256::ZERO); + } + } + + Ok(BlockStateDiff { + sorted_trie_updates: trie_updates.into_sorted(), + sorted_post_state: post_state.into_sorted(), + }) + })? + } + + /// Update the initial state with the provided diff. + /// Prune all historical trie data till `new_earliest_block_number` (inclusive) using + /// the [`BlockChangeSet`] index. + /// + /// Arguments: + /// - `new_earliest_block_ref`: The new earliest block reference (with parent hash). + /// - `diff`: The state diff to apply to the initial state (block 0). This diff represents all + /// the changes from the old earliest block to the new earliest block (inclusive). + fn prune_earliest_state( + &self, + new_earliest_block_ref: BlockWithParent, + ) -> OpProofsStorageResult { + let target_block = new_earliest_block_ref.block.number; + + // --- PHASE 1: READ (Calculate Deletions) --- + let plan = self.calculate_prune_plan(target_block)?; + let Some(plan) = plan else { + return Ok(WriteCounts::default()); + }; + + // --- PHASE 2: WRITE (Execute Deletions) --- + self.env.update(|tx| { + // 1. Execute Sparse Deletions and track actual deleted rows + let acc_deleted = + self.prune_history_preceding::(tx, plan.acc_survivors)?; + + let st_deleted = + self.prune_history_preceding::(tx, plan.storage_survivors)?; + + let ha_deleted = self.prune_history_preceding::( + tx, + plan.hashed_acc_survivors, + )?; + + let hs_deleted = self.prune_history_preceding::( + tx, + plan.hashed_storage_survivors, + )?; + + let counts = WriteCounts { + account_trie_updates_written_total: acc_deleted, + storage_trie_updates_written_total: st_deleted, + hashed_accounts_written_total: ha_deleted, + hashed_storages_written_total: hs_deleted, + }; + + // 2. Delete ChangeSets + let range = (plan.earliest_block + 1)..=target_block; + let mut cs_cursor = tx.cursor_write::()?; + let mut walker = cs_cursor.walk_range(range)?; + while walker.next().is_some() { + walker.delete_current()?; + } + + // 3. Update Earliest Pointer + Self::inner_set_earliest_block_number( + tx, + target_block, + new_earliest_block_ref.block.hash, + )?; + + Ok(counts) + })? + } + + /// Unwind the historical state to `unwind_upto_block` (inclusive), deleting all history + /// starting from provided block. Also updates the `ProofWindow::LatestBlock` to parent of + /// `unwind_upto_block`. + fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()> { + let history_to_delete = + self.env.view(|tx| self.collect_history_ranged(tx, to.block.number..))??; + + self.env.update(|tx| { + let proof_window = match self.inner_get_proof_window(tx)? { + Some(pw) => pw, + None => return Ok(()), // Nothing to unwind + }; + + if to.block.number > proof_window.latest.number { + return Ok(()); // Nothing to unwind + } + + if to.block.number <= proof_window.earliest.number { + return Err(OpProofsStorageError::UnwindBeyondEarliest { + unwind_block_number: to.block.number, + earliest_block_number: proof_window.earliest.number, + }); + } + + self.delete_history_ranged(tx, to.block.number.., history_to_delete)?; + + let new_latest_block = + BlockNumberHash::new(to.block.number.saturating_sub(1), to.parent); + + // Update proof window's Latest block + Self::inner_set_latest_block_number( + tx, + new_latest_block.number(), + *new_latest_block.hash(), + )?; + + Ok(()) + })? + } + + fn replace_updates( + &self, + latest_common_block: BlockNumHash, + mut blocks_to_add: Vec<(BlockWithParent, BlockStateDiff)>, + ) -> OpProofsStorageResult<()> { + // Sort the vec list by block number + blocks_to_add.sort_unstable_by_key(|(bwp, _)| bwp.block.number); + + let history_to_delete = self + .env + .view(|tx| self.collect_history_ranged(tx, latest_common_block.number + 1..))??; + + self.env.update(|tx| { + // Remove the old history + self.delete_history_ranged(tx, latest_common_block.number + 1.., history_to_delete)?; + + // Update the ProofWindow Latest Block to latest_common_block so we can perform + // `store_trie_updates_append_only`. + Self::inner_set_latest_block_number( + tx, + latest_common_block.number, + latest_common_block.hash, + )?; + + // Apply the new history + for (block_with_parent, diff) in blocks_to_add { + self.store_trie_updates_append_only(tx, block_with_parent, diff)?; + } + Ok(()) + })? + } + + fn set_earliest_block_number( + &self, + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + self.set_earliest_block_number_hash(block_number, hash) + } +} + +impl OpProofsInitialStateStore for MdbxProofsStorage { + fn initial_state_anchor(&self) -> OpProofsStorageResult { + // 1) NotStarted: no anchor row + let Some(block) = self.get_initial_state_anchor()? else { + return Ok(InitialStateAnchor::default()); + }; + + // 2) Completed: anchor exists + earliest is set + let completed = self.get_earliest_block_number()?.is_some(); + + // 3) InProgress / Completed: populate details + Ok(InitialStateAnchor { + block: Some(block), + status: if completed { + InitialStateStatus::Completed + } else { + InitialStateStatus::InProgress + }, + latest_account_trie_key: self.get_latest_key::()?, + latest_storage_trie_key: self.get_latest_key::()?, + latest_hashed_account_key: self.get_latest_key::()?, + latest_hashed_storage_key: self.get_latest_key::()?, + }) + } + + fn set_initial_state_anchor(&self, anchor: BlockNumHash) -> OpProofsStorageResult<()> { + self.env.update(|tx| { + let mut cur = tx.cursor_write::()?; + cur.insert(ProofWindowKey::InitialStateAnchor, &anchor.into())?; + Ok(()) + })? + } + + fn store_account_branches( + &self, + account_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()> { + let mut account_nodes = account_nodes; + if account_nodes.is_empty() { + return Ok(()); + } + + account_nodes.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch(tx, 0, account_nodes.into_iter(), true)?; + Ok(()) + })? + } + + fn store_storage_branches( + &self, + hashed_address: B256, + storage_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()> { + let mut storage_nodes = storage_nodes; + if storage_nodes.is_empty() { + return Ok(()); + } + + storage_nodes.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch( + tx, + 0, + storage_nodes.into_iter().map(|(path, node)| (hashed_address, path, node)), + true, + )?; + Ok(()) + })? + } + + fn store_hashed_accounts( + &self, + accounts: Vec<(B256, Option)>, + ) -> OpProofsStorageResult<()> { + let mut accounts = accounts; + if accounts.is_empty() { + return Ok(()); + } + + // sort the accounts by key to ensure insertion is efficient + accounts.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch(tx, 0, accounts.into_iter(), true)?; + Ok(()) + })? + } + + fn store_hashed_storages( + &self, + hashed_address: B256, + storages: Vec<(B256, U256)>, + ) -> OpProofsStorageResult<()> { + let mut storages = storages; + if storages.is_empty() { + return Ok(()); + } + + // sort the storages by key to ensure insertion is efficient + storages.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch( + tx, + 0, + storages + .into_iter() + .map(|(key, val)| (hashed_address, key, Some(StorageValue(val)))), + true, + )?; + Ok(()) + })? + } + + fn commit_initial_state(&self) -> OpProofsStorageResult { + let anchor = self.get_initial_state_anchor()?.ok_or(NoBlocksFound)?; + self.set_earliest_block_number(anchor.number, anchor.hash)?; + Ok(anchor) + } +} + +/// This implementation is copied from the +/// [`DatabaseMetrics`](reth_db::database_metrics::DatabaseMetrics) implementation for +/// [`DatabaseEnv`]. As the implementation hard-coded the table name, we need to reimplement it. +#[cfg(feature = "metrics")] +impl reth_db::database_metrics::DatabaseMetrics for MdbxProofsStorage { + fn report_metrics(&self) { + for (name, value, labels) in self.gauge_metrics() { + gauge!(name, labels).set(value); + } + } + + fn gauge_metrics(&self) -> Vec<(&'static str, f64, Vec

, + /// Reader to fetch block hash by block number + block_hash_reader: H, + /// Keep at least these many recent blocks + min_block_interval: u64, + /// Maximum number of blocks to prune in one database transaction + prune_batch_size: u64, + // TODO: add timeout - Maximum time for one pruner run. If `None`, no timeout. + #[doc(hidden)] + #[cfg(feature = "metrics")] + metrics: Metrics, +} + +impl OpProofStoragePruner { + /// Create a new pruner. + pub fn new( + provider: OpProofsStorage

, + block_hash_reader: H, + min_block_interval: u64, + prune_batch_size: u64, + ) -> Self { + Self { + provider, + block_hash_reader, + min_block_interval, + prune_batch_size, + #[cfg(feature = "metrics")] + metrics: Metrics::default(), + } + } +} + +impl OpProofStoragePruner +where + P: OpProofsStore, + H: BlockHashReader, +{ + fn run_inner(&self) -> OpProofStoragePrunerResult { + let latest_block_opt = self.provider.get_latest_block_number()?; + if latest_block_opt.is_none() { + trace!(target: "trie::pruner", "No latest blocks in the proof storage"); + return Ok(PrunerOutput::default()); + } + + let earliest_block_opt = self.provider.get_earliest_block_number()?; + if earliest_block_opt.is_none() { + trace!(target: "trie::pruner", "No earliest blocks in the proof storage"); + return Ok(PrunerOutput::default()); + } + + let latest_block = latest_block_opt.unwrap().0; + let earliest_block = earliest_block_opt.unwrap().0; + + let interval = latest_block.saturating_sub(earliest_block); + if interval <= self.min_block_interval { + trace!(target: "trie::pruner", "Nothing to prune"); + return Ok(PrunerOutput::default()); + } + + // at this point `latest_block` is always greater than `min_block_interval` + let target_earliest_block = latest_block - self.min_block_interval; + + info!( + target: "trie::pruner", + from_block = earliest_block, + to_block = target_earliest_block, + "Starting pruning proof storage", + ); + + let mut current_earliest_block = earliest_block; + let mut prune_output = PrunerOutput { + start_block: earliest_block, + end_block: target_earliest_block, + ..Default::default() + }; + + // Prune in batches + while current_earliest_block < target_earliest_block { + // Calculate the end of this batch + let batch_end_block = + cmp::min(current_earliest_block + self.prune_batch_size, target_earliest_block); + + let batch_output = self.prune_batch(current_earliest_block, batch_end_block)?; + + prune_output.extend_ref(batch_output); + + // Update loop state + current_earliest_block = batch_end_block; + } + + Ok(prune_output) + } + + /// Prunes a single batch of blocks. + fn prune_batch(&self, start_block: u64, end_block: u64) -> Result { + let batch_start_time = Instant::now(); + + // Fetch block hashes for the new earliest block of this batch + let new_earliest_block_hash = self + .block_hash_reader + .block_hash(end_block) + .inspect_err(|err| { + error!( + target: "trie::pruner", + block = end_block, + ?err, + "Failed to fetch block hash for new earliest block during pruning" + ) + })? + .ok_or(PrunerError::BlockNotFound(end_block))?; + + let parent_block_num = end_block - 1; + let parent_block_hash = self + .block_hash_reader + .block_hash(parent_block_num) + .inspect_err(|err| { + error!( + target: "trie::pruner", + block = parent_block_num, + ?err, + "Failed to fetch block hash for parent block during pruning" + ) + })? + .ok_or(PrunerError::BlockNotFound(parent_block_num))?; + + batch_start_time.elapsed(); + + let block_with_parent = BlockWithParent { + parent: parent_block_hash, + block: BlockNumHash { number: end_block, hash: new_earliest_block_hash }, + }; + + // Commit this batch + let write_counts = self.provider.prune_earliest_state(block_with_parent)?; + + let duration = batch_start_time.elapsed(); + let batch_output = PrunerOutput { duration, start_block, end_block, write_counts }; + + // Record metrics for this batch + #[cfg(feature = "metrics")] + self.metrics.record_prune_result(batch_output.clone()); + + info!( + target: "trie::pruner", + ?batch_output, + "Finished pruning batch of proof storage", + ); + Ok(batch_output) + } + + /// Run the pruner + pub fn run(&self) { + let res = self.run_inner(); + if let Err(e) = res { + error!(target: "trie::pruner", err=%e, "Pruner failed"); + return; + } + info!(target: "trie::pruner", result = %res.unwrap(), "Finished pruning proof storage"); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{BlockStateDiff, db::MdbxProofsStorage}; + use alloy_eips::{BlockHashOrNumber, NumHash}; + use alloy_primitives::{B256, BlockNumber, U256}; + use mockall::mock; + use reth_primitives_traits::Account; + use reth_storage_errors::provider::ProviderResult; + use reth_trie::{ + BranchNodeCompact, HashedPostState, HashedStorage, Nibbles, + hashed_cursor::HashedCursor, + trie_cursor::TrieCursor, + updates::{StorageTrieUpdates, TrieUpdates, TrieUpdatesSorted}, + }; + use std::sync::Arc; + use tempfile::TempDir; + + mock! ( + #[derive(Debug)] + pub BlockHashReader {} + + impl BlockHashReader for BlockHashReader { + fn block_hash(&self, number: BlockNumber) -> ProviderResult>; + + fn convert_block_hash( + &self, + _hash_or_number: BlockHashOrNumber, + ) -> ProviderResult>; + + fn canonical_hashes_range( + &self, + _start: BlockNumber, + _end: BlockNumber, + ) -> ProviderResult>; + } + ); + + fn b256(n: u64) -> B256 { + use alloy_primitives::keccak256; + keccak256(n.to_be_bytes()) + } + + /// Build a block-with-parent for number `n` with deterministic hash. + fn block(n: u64, parent: B256) -> BlockWithParent { + BlockWithParent::new(parent, NumHash::new(n, b256(n))) + } + + #[tokio::test] + async fn run_inner_and_and_verify_updated_state() { + // --- env/store --- + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + store.set_earliest_block_number(0, B256::ZERO).expect("set earliest"); + + // --- entities --- + // accounts + let a1 = B256::from([0xA1; 32]); + let a2 = B256::from([0xA2; 32]); + let a3 = B256::from([0xA3; 32]); // introduced later + + // one storage address with 3 slots + let stor_addr = B256::from([0x10; 32]); + let s1 = B256::from([0xB1; 32]); + let s2 = B256::from([0xB2; 32]); + let s3 = B256::from([0xB3; 32]); + + // account-trie paths (p1 gets removed by block 3; p2 remains; p3 added later) + let p1 = Nibbles::from_nibbles_unchecked([0x01, 0x02]); + let p2 = Nibbles::from_nibbles_unchecked([0x03, 0x04]); + let p3 = Nibbles::from_nibbles_unchecked([0x05, 0x06]); + + let node_p1 = BranchNodeCompact::new(0b1, 0, 0, vec![], Some(B256::from([0x11; 32]))); + let node_p2 = BranchNodeCompact::new(0b10, 0, 0, vec![], Some(B256::from([0x22; 32]))); + let node_p3 = BranchNodeCompact::new(0b11, 0, 0, vec![], Some(B256::from([0x33; 32]))); + + // storage-trie paths (st1 removed by block 3; st2 remains; st3 added later) + let st1 = Nibbles::from_nibbles_unchecked([0x0A]); + let st2 = Nibbles::from_nibbles_unchecked([0x0B]); + let st3 = Nibbles::from_nibbles_unchecked([0x0C]); + + let node_st2 = BranchNodeCompact::new(0b101, 0, 0, vec![], Some(B256::from([0x44; 32]))); + let node_st3 = BranchNodeCompact::new(0b110, 0, 0, vec![], Some(B256::from([0x55; 32]))); + + // --- write 5 blocks manually --- + let mut parent = B256::ZERO; + + // Block 1: add a1,a2; s1=100, s2=200; add p1, st1 + { + let b1 = block(1, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a1, + Some(Account { nonce: 1, balance: U256::from(1_001), ..Default::default() }), + ); + d_post_state.accounts.insert( + a2, + Some(Account { nonce: 1, balance: U256::from(1_002), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s1, U256::from(100)); + hs.storage.insert(s2, U256::from(200)); + d_post_state.storages.insert(stor_addr, hs); + + d_trie_updates.account_nodes.insert(p1, node_p1); + let e = d_trie_updates.storage_tries.entry(stor_addr).or_default(); + e.storage_nodes.insert(st1, BranchNodeCompact::default()); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b1, d).expect("b1"); + parent = b256(1); + } + + // Block 2: update a2; add a3; s2=220, s3=300; add p2, st2 + { + let b2 = block(2, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a2, + Some(Account { nonce: 2, balance: U256::from(2_002), ..Default::default() }), + ); + d_post_state.accounts.insert( + a3, + Some(Account { nonce: 1, balance: U256::from(1_003), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s2, U256::from(220)); + hs.storage.insert(s3, U256::from(300)); + d_post_state.storages.insert(stor_addr, hs); + + d_trie_updates.account_nodes.insert(p2, node_p2.clone()); + let e = d_trie_updates.storage_tries.entry(stor_addr).or_default(); + e.storage_nodes.insert(st2, node_st2.clone()); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b2, d).expect("b2"); + parent = b256(2); + } + + // Block 3: delete a1; leave a2,a3; remove p1; remove st1 (storage-trie) + { + let b3 = block(3, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + // delete a1, keep a2 & a3 values unchanged for this block + d_post_state.accounts.insert(a1, None); + + // remove account trie node p1 + d_trie_updates.removed_nodes.insert(p1); + + // remove storage-trie node st1 + let mut st_upd = StorageTrieUpdates::default(); + st_upd.removed_nodes.insert(st1); + d_trie_updates.storage_tries.insert(stor_addr, st_upd); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b3, d).expect("b3"); + parent = b256(3); + } + + // Block 4 (kept): update a2; s1=140; add p3, st3 + { + let b4 = block(4, parent); + + let mut d_trie_updates = TrieUpdates::default(); + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a2, + Some(Account { nonce: 3, balance: U256::from(3_002), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s1, U256::from(140)); + d_post_state.storages.insert(stor_addr, hs); + d_trie_updates.account_nodes.insert(p3, node_p3.clone()); + let e = d_trie_updates.storage_tries.entry(stor_addr).or_default(); + e.storage_nodes.insert(st3, node_st3.clone()); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: d_trie_updates.into_sorted(), + }; + store.store_trie_updates(b4, d).expect("b4"); + parent = b256(4); + } + + // Block 5 (kept): update a3; s3=330 + { + let b5 = block(5, parent); + + let mut d_post_state = HashedPostState::default(); + + d_post_state.accounts.insert( + a3, + Some(Account { nonce: 2, balance: U256::from(2_003), ..Default::default() }), + ); + + let mut hs = HashedStorage::default(); + hs.storage.insert(s3, U256::from(330)); + d_post_state.storages.insert(stor_addr, hs); + + let d = BlockStateDiff { + sorted_post_state: d_post_state.into_sorted(), + sorted_trie_updates: TrieUpdatesSorted::default(), + }; + store.store_trie_updates(b5, d).expect("b5"); + } + + // sanity: earliest=0, latest=5 + { + let e = store.get_earliest_block_number().expect("earliest").expect("some"); + let l = store.get_latest_block_number().expect("latest").expect("some"); + assert_eq!(e.0, 0); + assert_eq!(l.0, 5); + } + + // --- prune: remove the first 3 blocks, keep 4 and 5 + // new_earliest = 5-1 = 4 + let mut block_hash_reader = MockBlockHashReader::new(); + block_hash_reader + .expect_block_hash() + .withf(move |block_num| *block_num == 4) + .returning(move |_| Ok(Some(b256(4)))); + + block_hash_reader + .expect_block_hash() + .withf(move |block_num| *block_num == 3) + .returning(move |_| Ok(Some(b256(3)))); + + let pruner = OpProofStoragePruner::new(store.clone(), block_hash_reader, 1, 1000); + let out = pruner.run_inner().expect("pruner ok"); + assert_eq!(out.start_block, 0); + assert_eq!(out.end_block, 4, "pruned up to 4 (inclusive); new earliest is 4"); + + // proof window moved: earliest=4, latest=5 + { + let e = store.get_earliest_block_number().expect("earliest").expect("some"); + let l = store.get_latest_block_number().expect("latest").expect("some"); + assert_eq!(e.0, 4); + assert_eq!(e.1, b256(4)); + assert_eq!(l.0, 5); + assert_eq!(l.1, b256(5)); + } + + // --- DB checks + let mut acc_cur = store.account_hashed_cursor(4).expect("acc cur"); + let mut stor_cur = store.storage_hashed_cursor(stor_addr, 4).expect("stor cur"); + let mut acc_trie_cur = store.account_trie_cursor(4).expect("acc trie cur"); + let mut stor_trie_cur = store.storage_trie_cursor(stor_addr, 4).expect("stor trie cur"); + + // Check these histories have been removed + let pruned_hashed_account = a1; + let pruned_trie_accounts = p1; + let pruned_trie_storage = st1; + + assert_ne!( + acc_cur.seek(pruned_hashed_account).expect("seek").unwrap().0, + pruned_hashed_account, + "deleted account must not exist in earliest snapshot" + ); + assert_ne!( + acc_trie_cur.seek(pruned_trie_accounts).expect("seek").unwrap().0, + pruned_trie_accounts, + "deleted account trie must not exist in earliest snapshot" + ); + assert_ne!( + stor_trie_cur.seek(pruned_trie_storage).expect("seek").unwrap().0, + pruned_trie_storage, + "deleted storage trie must not exist in earliest snapshot" + ); + + // Check these histories have been updated - till block 4 + let updated_hashed_accounts = vec![ + (a2, Account { nonce: 3, balance: U256::from(3_002), ..Default::default() }), /* block 4 */ + (a3, Account { nonce: 1, balance: U256::from(1_003), ..Default::default() }), /* block 2 */ + ]; + let updated_hashed_storage = vec![ + (s1, U256::from(140)), // block 4 + (s2, U256::from(220)), // block 2 + (s3, U256::from(300)), // block 2 + ]; + let updated_trie_accounts = vec![ + (p2, node_p2), // block 2 + (p3, node_p3), // block 4 + ]; + let updated_trie_storage = vec![ + (st2, node_st2), // block 2 + (st3, node_st3), // block 4 + ]; + + for (key, val) in updated_hashed_accounts { + let (k, vv) = acc_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + + for (key, val) in updated_hashed_storage { + let (k, vv) = stor_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + + for (key, val) in updated_trie_accounts { + let (k, vv) = acc_trie_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + for (key, val) in updated_trie_storage { + let (k, vv) = stor_trie_cur.seek(key).expect("seek").unwrap(); + assert_eq!(key, k, "key must exist"); + assert_eq!(val, vv, "value must be updated"); + } + } + + // Both latest and earliest blocks are None -> early return default; DB untouched. + #[tokio::test] + async fn run_inner_where_latest_block_is_none() { + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + let earliest = store.get_earliest_block_number().unwrap(); + let latest = store.get_latest_block_number().unwrap(); + println!("{earliest:?} {latest:?}"); + assert!(earliest.is_none()); + assert!(latest.is_none()); + + let block_hash_reader = MockBlockHashReader::new(); + let pruner = OpProofStoragePruner::new(store, block_hash_reader, 10, 1000); + let out = pruner.run_inner().expect("ok"); + assert_eq!(out, PrunerOutput::default(), "should early-return default output"); + } + + // The earliest block is None, but the latest block exists -> early return default. + #[tokio::test] + async fn run_inner_earliest_none_real_db() { + use crate::BlockStateDiff; + + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + // Write a single block to set *latest* only. + store + .store_trie_updates(block(3, B256::ZERO), BlockStateDiff::default()) + .expect("store b1"); + + let earliest = store.get_earliest_block_number().unwrap(); + let latest = store.get_latest_block_number().unwrap(); + assert!(earliest.is_none(), "earliest must remain None"); + assert_eq!(latest.unwrap().0, 3); + + let block_hash_reader = MockBlockHashReader::new(); + let pruner = OpProofStoragePruner::new(store, block_hash_reader, 1, 1000); + let out = pruner.run_inner().expect("ok"); + assert_eq!(out, PrunerOutput::default(), "should early-return default output"); + } + + // interval < min_block_interval -> "Nothing to prune" path; default output. + #[tokio::test] + async fn run_inner_interval_too_small_real_db() { + use crate::BlockStateDiff; + + let dir = TempDir::new().unwrap(); + let store: OpProofsStorage> = + OpProofsStorage::from(Arc::new(MdbxProofsStorage::new(dir.path()).expect("env"))); + + // Set earliest=4 explicitly + let earliest_num = 4u64; + let h4 = b256(4); + store.set_earliest_block_number(earliest_num, h4).expect("set earliest"); + + // Set latest=5 by storing block 5 + let b5 = block(5, h4); + store.store_trie_updates(b5, BlockStateDiff::default()).expect("store b5"); + + // Sanity: earliest=4, latest=5 => interval=1 + let e = store.get_earliest_block_number().unwrap().unwrap(); + let l = store.get_latest_block_number().unwrap().unwrap(); + assert_eq!(e.0, 4); + assert_eq!(l.0, 5); + + // Require min_block_interval=2 (or greater) so interval < min + let block_hash_reader = MockBlockHashReader::new(); + let pruner = OpProofStoragePruner::new(store, block_hash_reader, 2, 1000); + let out = pruner.run_inner().expect("ok"); + assert_eq!(out, PrunerOutput::default(), "no pruning should occur"); + } +} diff --git a/rust/op-reth/crates/trie/src/prune/task.rs b/rust/op-reth/crates/trie/src/prune/task.rs new file mode 100644 index 0000000000000..3838343cb4082 --- /dev/null +++ b/rust/op-reth/crates/trie/src/prune/task.rs @@ -0,0 +1,64 @@ +use crate::{OpProofsStorage, OpProofsStore, prune::OpProofStoragePruner}; +use reth_provider::BlockHashReader; +use reth_tasks::shutdown::GracefulShutdown; +use tokio::{ + time, + time::{Duration, MissedTickBehavior}, +}; +use tracing::info; + +const PRUNE_BATCH_SIZE: u64 = 200; + +/// Periodic pruner task: constructs the pruner and runs it every interval. +#[derive(Debug)] +pub struct OpProofStoragePrunerTask { + pruner: OpProofStoragePruner, + min_block_interval: u64, + task_run_interval: Duration, +} + +impl OpProofStoragePrunerTask +where + P: OpProofsStore, + H: BlockHashReader, +{ + /// Initialize a new [`OpProofStoragePrunerTask`] + pub fn new( + provider: OpProofsStorage

, + hash_reader: H, + min_block_interval: u64, + task_run_interval: Duration, + ) -> Self { + let pruner = + OpProofStoragePruner::new(provider, hash_reader, min_block_interval, PRUNE_BATCH_SIZE); + Self { pruner, min_block_interval, task_run_interval } + } + + /// Run forever (until `cancel`), executing one prune pass per `task_run_interval`. + pub async fn run(self, mut signal: GracefulShutdown) { + info!( + target: "trie::pruner_task", + min_block_interval = self.min_block_interval, + interval_secs = self.task_run_interval.as_secs(), + "Starting pruner task" + ); + + // Drive pruning with a periodic ticker + let mut interval = time::interval(self.task_run_interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + tokio::select! { + _ = &mut signal => { + info!(target: "trie::pruner_task", "Pruner task cancelled; exiting"); + break; + } + _ = interval.tick() => { + self.pruner.run() + } + } + } + + info!(target: "trie::pruner_task", "Pruner task stopped"); + } +} diff --git a/rust/op-reth/crates/trie/tests/lib.rs b/rust/op-reth/crates/trie/tests/lib.rs new file mode 100644 index 0000000000000..ccf4a5c871374 --- /dev/null +++ b/rust/op-reth/crates/trie/tests/lib.rs @@ -0,0 +1,1967 @@ +//! Common test suite for [`OpProofsStore`] implementations. + +use alloy_eips::{BlockNumHash, NumHash, eip1898::BlockWithParent}; +use alloy_primitives::{B256, U256}; +use reth_optimism_trie::{ + BlockStateDiff, InMemoryProofsStorage, OpProofsInitialStateStore, OpProofsStorageError, + OpProofsStore, db::MdbxProofsStorage, +}; +use reth_primitives_traits::Account; +use reth_trie::{ + BranchNodeCompact, HashedPostState, HashedPostStateSorted, HashedStorage, Nibbles, TrieMask, + hashed_cursor::HashedCursor, + trie_cursor::TrieCursor, + updates::{TrieUpdates, TrieUpdatesSorted}, +}; +use serial_test::serial; +use std::sync::Arc; +use tempfile::TempDir; +use test_case::test_case; + +/// Helper to create a simple test branch node +fn create_test_branch() -> BranchNodeCompact { + let mut state_mask = TrieMask::default(); + state_mask.set_bit(0); + state_mask.set_bit(1); + + BranchNodeCompact { + state_mask, + tree_mask: TrieMask::default(), + hash_mask: TrieMask::default(), + hashes: Arc::new(vec![]), + root_hash: None, + } +} + +/// Helper to create a variant test branch node for comparison tests +fn create_test_branch_variant() -> BranchNodeCompact { + let mut state_mask = TrieMask::default(); + state_mask.set_bit(5); + state_mask.set_bit(6); + + BranchNodeCompact { + state_mask, + tree_mask: TrieMask::default(), + hash_mask: TrieMask::default(), + hashes: Arc::new(vec![]), + root_hash: None, + } +} + +/// Helper to create nibbles from a vector of u8 values +fn nibbles_from(vec: Vec) -> Nibbles { + Nibbles::from_nibbles_unchecked(vec) +} + +/// Helper to create a test account +fn create_test_account() -> Account { + Account { + nonce: 42, + balance: U256::from(1000000), + bytecode_hash: Some(B256::repeat_byte(0xBB)), + } +} + +/// Helper to create a test account with custom values +fn create_test_account_with_values(nonce: u64, balance: u64, code_hash_byte: u8) -> Account { + Account { + nonce, + balance: U256::from(balance), + bytecode_hash: Some(B256::repeat_byte(code_hash_byte)), + } +} + +fn create_mdbx_proofs_storage() -> MdbxProofsStorage { + let path = TempDir::new().unwrap(); + MdbxProofsStorage::new(path.path()).unwrap() +} + +/// Test basic storage and retrieval of earliest block number +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_earliest_block_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + // Initially should be None + let earliest = storage.get_earliest_block_number()?; + assert!(earliest.is_none()); + + // Set earliest block + let block_hash = B256::repeat_byte(0x42); + storage.set_earliest_block_number(100, block_hash)?; + + // Should retrieve the same values + let earliest = storage.get_earliest_block_number()?; + assert_eq!(earliest, Some((100, block_hash))); + + Ok(()) +} + +/// Test storing and retrieving trie updates +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_trie_updates_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + let sorted_trie_updates = TrieUpdatesSorted::default(); + let sorted_post_state = HashedPostStateSorted::default(); + let block_state_diff = BlockStateDiff { + sorted_trie_updates: sorted_trie_updates.clone(), + sorted_post_state: sorted_post_state.clone(), + }; + + // Store trie updates + storage.store_trie_updates(block_ref, block_state_diff)?; + + // Retrieve and verify + let retrieved_diff = storage.fetch_trie_updates(block_ref.block.number)?; + assert_eq!(retrieved_diff.sorted_trie_updates, sorted_trie_updates); + assert_eq!(retrieved_diff.sorted_post_state, sorted_post_state); + + Ok(()) +} + +// ============================================================================= +// 1. Basic Cursor Operations +// ============================================================================= + +/// Test cursor operations on empty trie +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_cursor_empty_trie( + storage: S, +) -> Result<(), OpProofsStorageError> { + let mut cursor = storage.account_trie_cursor(100)?; + + // All operations should return None on empty trie + assert!(cursor.seek_exact(Nibbles::default())?.is_none()); + assert!(cursor.seek(Nibbles::default())?.is_none()); + assert!(cursor.next()?.is_none()); + assert!(cursor.current()?.is_none()); + + Ok(()) +} + +/// Test cursor operations with single entry +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_cursor_single_entry( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + // Store single entry + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + + // Test seek_exact + let result = cursor.seek_exact(path)?.unwrap(); + assert_eq!(result.0, path); + + // Test current position + assert_eq!(cursor.current()?.unwrap(), path); + + // Test next from end should return None + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test cursor operations with multiple entries +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_cursor_multiple_entries( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![ + nibbles_from(vec![1]), + nibbles_from(vec![1, 2]), + nibbles_from(vec![2]), + nibbles_from(vec![2, 3]), + ]; + let branch = create_test_branch(); + + // Store multiple entries + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + + // Test that we can iterate through all entries + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor.next()? { + found_paths.push(path); + } + + assert_eq!(found_paths.len(), 4); + // Paths should be in lexicographic order + for i in 0..paths.len() { + assert_eq!(found_paths[i], paths[i]); + } + + Ok(()) +} + +// ============================================================================= +// 2. Seek Operations +// ============================================================================= + +/// Test `seek_exact` with existing path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_seek_exact_existing_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + let result = cursor.seek_exact(path)?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +/// Test `seek_exact` with non-existing path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_seek_exact_non_existing_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + let non_existing = nibbles_from(vec![4, 5, 6]); + assert!(cursor.seek_exact(non_existing)?.is_none()); + + Ok(()) +} + +/// Test `seek_exact` with empty path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_seek_exact_empty_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + let result = cursor.seek_exact(Nibbles::default())?.unwrap(); + assert_eq!(result.0, Nibbles::default()); + + Ok(()) +} + +/// Test seek to existing path +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_seek_to_existing_path( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2, 3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + let result = cursor.seek(path)?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +/// Test seek between existing nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_seek_between_existing_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![3]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + // Seek to path between 1 and 3, should return path 3 + let seek_path = nibbles_from(vec![2]); + let result = cursor.seek(seek_path)?.unwrap(); + assert_eq!(result.0, path2); + + Ok(()) +} + +/// Test seek after all nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_seek_after_all_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + // Seek to path after all nodes + let seek_path = nibbles_from(vec![9]); + assert!(cursor.seek(seek_path)?.is_none()); + + Ok(()) +} + +/// Test seek before all nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_seek_before_all_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![5]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + // Seek to path before all nodes, should return first node + let seek_path = nibbles_from(vec![1]); + let result = cursor.seek(seek_path)?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +// ============================================================================= +// 3. Navigation Tests +// ============================================================================= + +/// Test next without prior seek +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_next_without_prior_seek( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + // next() without prior seek should start from beginning + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, path); + + Ok(()) +} + +/// Test next after seek +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_next_after_seek( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![2]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + cursor.seek(path1)?; + + // next() should return second node + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, path2); + + Ok(()) +} + +/// Test next at end of trie +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_next_at_end_of_trie( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + cursor.seek(path)?; + + // next() at end should return None + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test multiple consecutive next calls +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_multiple_consecutive_next( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![nibbles_from(vec![1]), nibbles_from(vec![2]), nibbles_from(vec![3])]; + let branch = create_test_branch(); + + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + + // Iterate through all with consecutive next() calls + for expected_path in &paths { + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, *expected_path); + } + + // Final next() should return None + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test current after operations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_current_after_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![2]); + let branch = create_test_branch(); + + storage.store_account_branches(vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; + + let mut cursor = storage.account_trie_cursor(100)?; + + // Current should be None initially + assert!(cursor.current()?.is_none()); + + // After seek, current should track position + cursor.seek(path1)?; + assert_eq!(cursor.current()?.unwrap(), path1); + + // After next, current should update + cursor.next()?; + assert_eq!(cursor.current()?.unwrap(), path2); + + Ok(()) +} + +/// Test current with no prior operations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_current_no_prior_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + let mut cursor = storage.account_trie_cursor(100)?; + + // Current should be None when no operations performed + assert!(cursor.current()?.is_none()); + + Ok(()) +} + +// ============================================================================= +// 4. Block Number Filtering +// ============================================================================= + +/// Test same path with different blocks +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_same_path_different_blocks( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let branch1 = create_test_branch(); + let branch2 = create_test_branch_variant(); + + // Store same path at different blocks + storage.store_account_branches(vec![(path, Some(branch1))])?; + storage.store_account_branches(vec![(path, Some(branch2))])?; + + // Cursor with max_block_number=75 should see only block 50 data + let mut cursor75 = storage.account_trie_cursor(75)?; + let result75 = cursor75.seek_exact(path)?.unwrap(); + assert_eq!(result75.0, path); + + // Cursor with max_block_number=150 should see block 100 data (latest) + let mut cursor150 = storage.account_trie_cursor(150)?; + let result150 = cursor150.seek_exact(path)?.unwrap(); + assert_eq!(result150.0, path); + + Ok(()) +} + +/// Test deleted branch nodes +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_deleted_branch_nodes( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let branch = create_test_branch(); + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + + // Store branch node, then delete it (store None) + storage.store_account_branches(vec![(path, Some(branch))])?; + + // Cursor before deletion should see the node + let mut cursor75 = storage.account_trie_cursor(75)?; + assert!(cursor75.seek_exact(path)?.is_some()); + + let mut block_state_diff_trie_updates = TrieUpdates::default(); + block_state_diff_trie_updates.removed_nodes.insert(path); + let block_state_diff = BlockStateDiff { + sorted_trie_updates: block_state_diff_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + storage.store_trie_updates(block_ref, block_state_diff)?; + + // Cursor after deletion should not see the node + let mut cursor150 = storage.account_trie_cursor(150)?; + assert!(cursor150.seek_exact(path)?.is_none()); + + Ok(()) +} + +// ============================================================================= +// 5. Hashed Address Filtering +// ============================================================================= + +/// Test account-specific cursor +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_account_specific_cursor( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let addr1 = B256::repeat_byte(0x01); + let addr2 = B256::repeat_byte(0x02); + let branch = create_test_branch(); + + // Store same path for different accounts (using storage branches) + storage.store_storage_branches(addr1, vec![(path, Some(branch.clone()))])?; + storage.store_storage_branches(addr2, vec![(path, Some(branch))])?; + + // Cursor for addr1 should only see addr1 data + let mut cursor1 = storage.storage_trie_cursor(addr1, 100)?; + let result1 = cursor1.seek_exact(path)?.unwrap(); + assert_eq!(result1.0, path); + + // Cursor for addr2 should only see addr2 data + let mut cursor2 = storage.storage_trie_cursor(addr2, 100)?; + let result2 = cursor2.seek_exact(path)?.unwrap(); + assert_eq!(result2.0, path); + + // Cursor for addr1 should not see addr2 data when iterating + let mut cursor1_iter = storage.storage_trie_cursor(addr1, 100)?; + let mut found_count = 0; + while cursor1_iter.next()?.is_some() { + found_count += 1; + } + assert_eq!(found_count, 1); // Should only see one entry (for addr1) + + Ok(()) +} + +/// Test state trie cursor +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_state_trie_cursor( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path = nibbles_from(vec![1, 2]); + let addr = B256::repeat_byte(0x01); + let branch = create_test_branch(); + + // Store data for account trie and state trie + storage.store_storage_branches(addr, vec![(path, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path, Some(branch))])?; + + // State trie cursor (None address) should only see state trie data + let mut state_cursor = storage.account_trie_cursor(100)?; + let result = state_cursor.seek_exact(path)?.unwrap(); + assert_eq!(result.0, path); + + // Verify state cursor doesn't see account data when iterating + let mut state_cursor_iter = storage.account_trie_cursor(100)?; + let mut found_count = 0; + while state_cursor_iter.next()?.is_some() { + found_count += 1; + } + + assert_eq!(found_count, 1); // Should only see state trie entry + + Ok(()) +} + +/// Test mixed account and state data +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_mixed_account_state_data( + storage: S, +) -> Result<(), OpProofsStorageError> { + let path1 = nibbles_from(vec![1]); + let path2 = nibbles_from(vec![2]); + let addr = B256::repeat_byte(0x01); + let branch = create_test_branch(); + + // Store mixed account and state trie data + storage.store_storage_branches(addr, vec![(path1, Some(branch.clone()))])?; + storage.store_account_branches(vec![(path2, Some(branch))])?; + + // Account cursor should only see account data + let mut account_cursor = storage.storage_trie_cursor(addr, 100)?; + let mut account_paths = Vec::new(); + while let Some((path, _)) = account_cursor.next()? { + account_paths.push(path); + } + assert_eq!(account_paths.len(), 1); + assert_eq!(account_paths[0], path1); + + // State cursor should only see state data + let mut state_cursor = storage.account_trie_cursor(100)?; + let mut state_paths = Vec::new(); + while let Some((path, _)) = state_cursor.next()? { + state_paths.push(path); + } + assert_eq!(state_paths.len(), 1); + assert_eq!(state_paths[0], path2); + + Ok(()) +} + +// ============================================================================= +// 6. Path Ordering Tests +// ============================================================================= + +/// Test lexicographic ordering +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_lexicographic_ordering( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![ + nibbles_from(vec![3, 1]), + nibbles_from(vec![1, 2]), + nibbles_from(vec![2]), + nibbles_from(vec![1]), + ]; + let branch = create_test_branch(); + + // Store paths in random order + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor.next()? { + found_paths.push(path); + } + + // Should be returned in lexicographic order: [1], [1,2], [2], [3,1] + let expected_order = vec![ + nibbles_from(vec![1]), + nibbles_from(vec![1, 2]), + nibbles_from(vec![2]), + nibbles_from(vec![3, 1]), + ]; + + assert_eq!(found_paths, expected_order); + + Ok(()) +} + +/// Test path prefix scenarios +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_path_prefix_scenarios( + storage: S, +) -> Result<(), OpProofsStorageError> { + let paths = vec![ + nibbles_from(vec![1]), // Prefix of next + nibbles_from(vec![1, 2]), // Extends first + nibbles_from(vec![1, 2, 3]), // Extends second + ]; + let branch = create_test_branch(); + + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + + // Seek to prefix should find exact match + let result = cursor.seek_exact(paths[0])?.unwrap(); + assert_eq!(result.0, paths[0]); + + // Next should go to next path, not skip prefixed paths + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, paths[1]); + + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, paths[2]); + + Ok(()) +} + +/// Test complex nibble combinations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_complex_nibble_combinations( + storage: S, +) -> Result<(), OpProofsStorageError> { + // Test various nibble patterns including edge values + let paths = vec![ + nibbles_from(vec![0]), + nibbles_from(vec![0, 15]), + nibbles_from(vec![15]), + nibbles_from(vec![15, 0]), + nibbles_from(vec![7, 8, 9]), + ]; + let branch = create_test_branch(); + + for path in &paths { + storage.store_account_branches(vec![(*path, Some(branch.clone()))])?; + } + + let mut cursor = storage.account_trie_cursor(100)?; + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor.next()? { + found_paths.push(path); + } + + // All paths should be found and in correct order + assert_eq!(found_paths.len(), 5); + + // Verify specific ordering for edge cases + assert_eq!(found_paths[0], nibbles_from(vec![0])); + assert_eq!(found_paths[1], nibbles_from(vec![0, 15])); + assert_eq!(found_paths[4], nibbles_from(vec![15, 0])); + + Ok(()) +} + +// ============================================================================= +// 7. Leaf Node Tests (Hashed Accounts and Storage) +// ============================================================================= + +/// Test store and retrieve single account +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_store_and_retrieve_single_account( + storage: S, +) -> Result<(), OpProofsStorageError> { + let account_key = B256::repeat_byte(0x01); + let account = create_test_account(); + + // Store account + storage.store_hashed_accounts(vec![(account_key, Some(account))])?; + + // Retrieve via cursor + let mut cursor = storage.account_hashed_cursor(100)?; + let result = cursor.seek(account_key)?.unwrap(); + + assert_eq!(result.0, account_key); + assert_eq!(result.1.nonce, account.nonce); + assert_eq!(result.1.balance, account.balance); + assert_eq!(result.1.bytecode_hash, account.bytecode_hash); + + Ok(()) +} + +/// Test account cursor navigation +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_account_cursor_navigation( + storage: S, +) -> Result<(), OpProofsStorageError> { + let accounts = [ + (B256::repeat_byte(0x01), create_test_account()), + (B256::repeat_byte(0x03), create_test_account()), + (B256::repeat_byte(0x05), create_test_account()), + ]; + + // Store accounts + let accounts_to_store: Vec<_> = accounts.iter().map(|(k, v)| (*k, Some(*v))).collect(); + storage.store_hashed_accounts(accounts_to_store)?; + + let mut cursor = storage.account_hashed_cursor(100)?; + + // Test seeking to exact key + let result = cursor.seek(accounts[1].0)?.unwrap(); + assert_eq!(result.0, accounts[1].0); + + // Test seeking to key that doesn't exist (should return next greater) + let seek_key = B256::repeat_byte(0x02); + let result = cursor.seek(seek_key)?.unwrap(); + assert_eq!(result.0, accounts[1].0); // Should find 0x03 + + // Test next() navigation + let result = cursor.next()?.unwrap(); + assert_eq!(result.0, accounts[2].0); // Should find 0x05 + + // Test next() at end + assert!(cursor.next()?.is_none()); + + Ok(()) +} + +/// Test account block versioning +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_account_block_versioning( + storage: S, +) -> Result<(), OpProofsStorageError> { + let account_key = B256::repeat_byte(0x01); + let account_v1 = create_test_account_with_values(1, 100, 0xBB); + let account_v2 = create_test_account_with_values(2, 200, 0xDD); + + // Store account at different blocks + storage.store_hashed_accounts(vec![(account_key, Some(account_v1))])?; + + // Cursor with max_block_number=75 should see v1 + let mut cursor75 = storage.account_hashed_cursor(75)?; + let result75 = cursor75.seek(account_key)?.unwrap(); + assert_eq!(result75.1.nonce, account_v1.nonce); + assert_eq!(result75.1.balance, account_v1.balance); + + storage.store_hashed_accounts(vec![(account_key, Some(account_v2))])?; + + // After update, Cursor with max_block_number=150 should see v2 + let mut cursor150 = storage.account_hashed_cursor(150)?; + let result150 = cursor150.seek(account_key)?.unwrap(); + assert_eq!(result150.1.nonce, account_v2.nonce); + assert_eq!(result150.1.balance, account_v2.balance); + + Ok(()) +} + +/// Test store and retrieve storage +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] + +fn test_store_and_retrieve_storage( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), + (B256::repeat_byte(0x20), U256::from(200)), + (B256::repeat_byte(0x30), U256::from(300)), + ]; + + // Store storage slots + storage.store_hashed_storages(hashed_address, storage_slots.clone())?; + + // Retrieve via cursor + let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + + // Test seeking to each slot + for (key, expected_value) in &storage_slots { + let result = cursor.seek(*key)?.unwrap(); + assert_eq!(result.0, *key); + assert_eq!(result.1, *expected_value); + } + + Ok(()) +} + +/// Test storage cursor navigation +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_storage_cursor_navigation( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), + (B256::repeat_byte(0x30), U256::from(300)), + (B256::repeat_byte(0x50), U256::from(500)), + ]; + + storage.store_hashed_storages(hashed_address, storage_slots.clone())?; + + let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + + // Start from beginning with next() + let mut found_slots = Vec::new(); + while let Some((key, value)) = cursor.next()? { + found_slots.push((key, value)); + } + + assert_eq!(found_slots.len(), 3); + assert_eq!(found_slots[0], storage_slots[0]); + assert_eq!(found_slots[1], storage_slots[1]); + assert_eq!(found_slots[2], storage_slots[2]); + + Ok(()) +} + +/// Test storage account isolation +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_storage_account_isolation( + storage: S, +) -> Result<(), OpProofsStorageError> { + let address1 = B256::repeat_byte(0x01); + let address2 = B256::repeat_byte(0x02); + let storage_key = B256::repeat_byte(0x10); + + // Store same storage key for different accounts + storage.store_hashed_storages(address1, vec![(storage_key, U256::from(100))])?; + storage.store_hashed_storages(address2, vec![(storage_key, U256::from(200))])?; + + // Verify each account sees only its own storage + let mut cursor1 = storage.storage_hashed_cursor(address1, 100)?; + let result1 = cursor1.seek(storage_key)?.unwrap(); + assert_eq!(result1.1, U256::from(100)); + + let mut cursor2 = storage.storage_hashed_cursor(address2, 100)?; + let result2 = cursor2.seek(storage_key)?.unwrap(); + assert_eq!(result2.1, U256::from(200)); + + // Verify cursor1 doesn't see address2's storage + let mut cursor1_iter = storage.storage_hashed_cursor(address1, 100)?; + let mut count = 0; + while cursor1_iter.next()?.is_some() { + count += 1; + } + assert_eq!(count, 1); // Should only see one entry + + Ok(()) +} + +/// Test storage block versioning +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_storage_block_versioning( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_key = B256::repeat_byte(0x10); + + // Store storage at different blocks + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))])?; + + // Cursor with max_block_number=75 should see old value + let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; + let result75 = cursor75.seek(storage_key)?.unwrap(); + assert_eq!(result75.1, U256::from(100)); + + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(200))])?; + // Cursor with max_block_number=150 should see new value + let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; + let result150 = cursor150.seek(storage_key)?.unwrap(); + assert_eq!(result150.1, U256::from(200)); + + Ok(()) +} + +/// Test storage zero value deletion +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_storage_zero_value_deletion( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + let storage_key = B256::repeat_byte(0x10); + + // Store non-zero value + storage.store_hashed_storages(hashed_address, vec![(storage_key, U256::from(100))])?; + + // Cursor before deletion should see the value + let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; + let result75 = cursor75.seek(storage_key)?.unwrap(); + assert_eq!(result75.1, U256::from(100)); + + // "Delete" by storing zero value at block 100 + let mut block_state_diff_post_state = HashedPostState::default(); + let mut hashed_storage = HashedStorage::default(); + hashed_storage.storage.insert(storage_key, U256::ZERO); + block_state_diff_post_state.storages.insert(hashed_address, hashed_storage); + + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + let block_state_diff = BlockStateDiff { + sorted_trie_updates: TrieUpdatesSorted::default(), + sorted_post_state: block_state_diff_post_state.into_sorted(), + }; + storage.store_trie_updates(block_ref, block_state_diff)?; + + // Cursor after deletion should NOT see the entry (zero values are skipped) + let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; + let result150 = cursor150.seek(storage_key)?; + assert!(result150.is_none(), "Zero values should be skipped/deleted"); + + Ok(()) +} + +/// Test that zero values are skipped during iteration +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_storage_cursor_skips_zero_values( + storage: S, +) -> Result<(), OpProofsStorageError> { + let hashed_address = B256::repeat_byte(0x01); + + // Create a mix of non-zero and zero value storage slots + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), // Non-zero + (B256::repeat_byte(0x20), U256::ZERO), // Zero value - should be skipped + (B256::repeat_byte(0x30), U256::from(300)), // Non-zero + (B256::repeat_byte(0x40), U256::ZERO), // Zero value - should be skipped + (B256::repeat_byte(0x50), U256::from(500)), // Non-zero + ]; + + // Store all slots + storage.store_hashed_storages(hashed_address, storage_slots)?; + + // Create cursor and iterate through all entries + let mut cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + let mut found_slots = Vec::new(); + while let Some((key, value)) = cursor.next()? { + found_slots.push((key, value)); + } + + // Should only find 3 non-zero values + assert_eq!(found_slots.len(), 3, "Zero values should be skipped during iteration"); + + // Verify the non-zero values are the ones we stored + assert_eq!(found_slots[0], (B256::repeat_byte(0x10), U256::from(100))); + assert_eq!(found_slots[1], (B256::repeat_byte(0x30), U256::from(300))); + assert_eq!(found_slots[2], (B256::repeat_byte(0x50), U256::from(500))); + + // Verify seeking to a zero-value slot returns None or skips to next non-zero + let mut seek_cursor = storage.storage_hashed_cursor(hashed_address, 100)?; + let seek_result = seek_cursor.seek(B256::repeat_byte(0x20))?; + + // Should either return None or skip to the next non-zero value (0x30) + if let Some((key, value)) = seek_result { + assert_eq!(key, B256::repeat_byte(0x30), "Should skip zero value and find next non-zero"); + assert_eq!(value, U256::from(300)); + } + + Ok(()) +} + +/// Test empty cursors +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_empty_cursors( + storage: S, +) -> Result<(), OpProofsStorageError> { + // Test empty account cursor + let mut account_cursor = storage.account_hashed_cursor(100)?; + assert!(account_cursor.seek(B256::repeat_byte(0x01))?.is_none()); + assert!(account_cursor.next()?.is_none()); + + // Test empty storage cursor + let mut storage_cursor = storage.storage_hashed_cursor(B256::repeat_byte(0x01), 100)?; + assert!(storage_cursor.seek(B256::repeat_byte(0x10))?.is_none()); + assert!(storage_cursor.next()?.is_none()); + + Ok(()) +} + +/// Test cursor boundary conditions +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_cursor_boundary_conditions( + storage: S, +) -> Result<(), OpProofsStorageError> { + let account_key = B256::repeat_byte(0x80); // Middle value + let account = create_test_account(); + + storage.store_hashed_accounts(vec![(account_key, Some(account))])?; + + let mut cursor = storage.account_hashed_cursor(100)?; + + // Seek to minimum key should find our account + let result = cursor.seek(B256::ZERO)?.unwrap(); + assert_eq!(result.0, account_key); + + // Seek to maximum key should find nothing + assert!(cursor.seek(B256::repeat_byte(0xFF))?.is_none()); + + // Seek to key just before our account should find our account + let just_before = B256::repeat_byte(0x7F); + let result = cursor.seek(just_before)?.unwrap(); + assert_eq!(result.0, account_key); + + Ok(()) +} + +/// Test large batch operations +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_large_batch_operations( + storage: S, +) -> Result<(), OpProofsStorageError> { + // Create large batch of accounts + let mut accounts = Vec::new(); + for i in 0..100 { + let key = B256::from([i as u8; 32]); + let account = create_test_account_with_values(i, i * 1000, (i + 1) as u8); + accounts.push((key, Some(account))); + } + + // Store in batch + storage.store_hashed_accounts(accounts.clone())?; + + // Verify all accounts can be retrieved + let mut cursor = storage.account_hashed_cursor(100)?; + let mut found_count = 0; + while cursor.next()?.is_some() { + found_count += 1; + } + assert_eq!(found_count, 100); + + // Test specific account retrieval + let test_key = B256::from([42u8; 32]); + let result = cursor.seek(test_key)?.unwrap(); + assert_eq!(result.0, test_key); + assert_eq!(result.1.nonce, 42); + + Ok(()) +} + +/// Test wiped storage in [`HashedPostState`] +/// +/// When `store_trie_updates` receives a [`HashedPostState`] with wiped=true for a storage entry, +/// it should iterate all existing values for that address and create deletion entries for them. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_store_trie_updates_with_wiped_storage( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::HashedStorage; + + let hashed_address = B256::repeat_byte(0x01); + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + + // First, store some storage values at block 50 + let storage_slots = vec![ + (B256::repeat_byte(0x10), U256::from(100)), + (B256::repeat_byte(0x20), U256::from(200)), + (B256::repeat_byte(0x30), U256::from(300)), + (B256::repeat_byte(0x40), U256::from(400)), + ]; + + storage.store_hashed_storages(hashed_address, storage_slots.clone())?; + + // Verify all values are present at block 75 + let mut cursor75 = storage.storage_hashed_cursor(hashed_address, 75)?; + let mut found_slots = Vec::new(); + while let Some((key, value)) = cursor75.next()? { + found_slots.push((key, value)); + } + assert_eq!(found_slots.len(), 4, "All storage slots should be present before wipe"); + assert_eq!(found_slots[0], (B256::repeat_byte(0x10), U256::from(100))); + assert_eq!(found_slots[1], (B256::repeat_byte(0x20), U256::from(200))); + assert_eq!(found_slots[2], (B256::repeat_byte(0x30), U256::from(300))); + assert_eq!(found_slots[3], (B256::repeat_byte(0x40), U256::from(400))); + + // Now create a HashedPostState with wiped=true for this address at block 100 + let mut post_state = HashedPostState::default(); + let wiped_storage = HashedStorage::new(true); // wiped=true, empty storage map + post_state.storages.insert(hashed_address, wiped_storage); + + let block_state_diff = BlockStateDiff { + sorted_trie_updates: TrieUpdatesSorted::default(), + sorted_post_state: post_state.into_sorted(), + }; + + // Store the wiped state + storage.store_trie_updates(block_ref, block_state_diff)?; + + // After wiping, cursor at block 150 should see NO storage values + let mut cursor150 = storage.storage_hashed_cursor(hashed_address, 150)?; + let mut found_slots_after_wipe = Vec::new(); + while let Some((key, value)) = cursor150.next()? { + found_slots_after_wipe.push((key, value)); + } + + assert_eq!( + found_slots_after_wipe.len(), + 0, + "All storage slots should be deleted after wipe. Found: {found_slots_after_wipe:?}" + ); + + // Verify individual seeks also return None + for (slot, _) in &storage_slots { + let mut seek_cursor = storage.storage_hashed_cursor(hashed_address, 150)?; + let result = seek_cursor.seek(*slot)?; + assert!( + result.is_none() || result.unwrap().0 != *slot, + "Storage slot {slot:?} should be deleted after wipe" + ); + } + + // Verify cursor at block 75 (before wipe) still sees all values + let mut cursor75_after = storage.storage_hashed_cursor(hashed_address, 75)?; + let mut found_slots_before_wipe = Vec::new(); + while let Some((key, value)) = cursor75_after.next()? { + found_slots_before_wipe.push((key, value)); + } + assert_eq!( + found_slots_before_wipe.len(), + 4, + "All storage slots should still be present when querying before wipe block" + ); + + Ok(()) +} + +/// Test that `store_trie_updates` properly stores branch nodes, leaf nodes, and removals +/// +/// This test verifies that all data stored via `store_trie_updates` can be read back +/// through the cursor APIs. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_store_trie_updates_comprehensive( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::{HashedStorage, updates::StorageTrieUpdates}; + + let block_ref = BlockWithParent::new(B256::ZERO, NumHash::new(100, B256::repeat_byte(0x96))); + + // Create comprehensive trie updates with branches, leaves, and removals + let mut trie_updates = TrieUpdates::default(); + + // Add account branch nodes + let account_path1 = nibbles_from(vec![1, 2, 3]); + let account_path2 = nibbles_from(vec![4, 5, 6]); + let account_branch1 = create_test_branch(); + let account_branch2 = create_test_branch_variant(); + + trie_updates.account_nodes.insert(account_path1, account_branch1); + trie_updates.account_nodes.insert(account_path2, account_branch2); + + // Add removed account nodes + let removed_account_path = nibbles_from(vec![7, 8, 9]); + trie_updates.removed_nodes.insert(removed_account_path); + + // Add storage branch nodes for an address + let hashed_address = B256::repeat_byte(0x42); + let storage_path1 = nibbles_from(vec![1, 1]); + let storage_path2 = nibbles_from(vec![2, 2]); + let storage_branch = create_test_branch(); + + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_path1, storage_branch.clone()); + storage_trie.storage_nodes.insert(storage_path2, storage_branch); + + // Add removed storage node + let removed_storage_path = nibbles_from(vec![3, 3]); + storage_trie.removed_nodes.insert(removed_storage_path); + + trie_updates.insert_storage_updates(hashed_address, storage_trie); + + // Create post state with accounts and storage + let mut post_state = HashedPostState::default(); + + // Add accounts + let account1_addr = B256::repeat_byte(0x10); + let account2_addr = B256::repeat_byte(0x20); + let account1 = create_test_account_with_values(1, 1000, 0xAA); + let account2 = create_test_account_with_values(2, 2000, 0xBB); + + post_state.accounts.insert(account1_addr, Some(account1)); + post_state.accounts.insert(account2_addr, Some(account2)); + + // Add deleted account + let deleted_account_addr = B256::repeat_byte(0x30); + post_state.accounts.insert(deleted_account_addr, None); + + // Add storage for an address + let storage_addr = B256::repeat_byte(0x50); + let mut hashed_storage = HashedStorage::new(false); + hashed_storage.storage.insert(B256::repeat_byte(0x01), U256::from(111)); + hashed_storage.storage.insert(B256::repeat_byte(0x02), U256::from(222)); + hashed_storage.storage.insert(B256::repeat_byte(0x03), U256::ZERO); // Deleted storage + post_state.storages.insert(storage_addr, hashed_storage); + + let block_state_diff = BlockStateDiff { + sorted_trie_updates: trie_updates.into_sorted(), + sorted_post_state: post_state.into_sorted(), + }; + + // Store the updates + storage.store_trie_updates(block_ref, block_state_diff)?; + + // ========== Verify Account Branch Nodes ========== + let mut account_trie_cursor = storage.account_trie_cursor(block_ref.block.number + 10)?; + + // Should find the added branches + let result1 = account_trie_cursor.seek_exact(account_path1)?; + assert!(result1.is_some(), "Account branch node 1 should be found"); + assert_eq!(result1.unwrap().0, account_path1); + + let result2 = account_trie_cursor.seek_exact(account_path2)?; + assert!(result2.is_some(), "Account branch node 2 should be found"); + assert_eq!(result2.unwrap().0, account_path2); + + // Removed node should not be found + let removed_result = account_trie_cursor.seek_exact(removed_account_path)?; + assert!(removed_result.is_none(), "Removed account node should not be found"); + + // ========== Verify Storage Branch Nodes ========== + let mut storage_trie_cursor = + storage.storage_trie_cursor(hashed_address, block_ref.block.number + 10)?; + + let storage_result1 = storage_trie_cursor.seek_exact(storage_path1)?; + assert!(storage_result1.is_some(), "Storage branch node 1 should be found"); + + let storage_result2 = storage_trie_cursor.seek_exact(storage_path2)?; + assert!(storage_result2.is_some(), "Storage branch node 2 should be found"); + + // Removed storage node should not be found + let removed_storage_result = storage_trie_cursor.seek_exact(removed_storage_path)?; + assert!(removed_storage_result.is_none(), "Removed storage node should not be found"); + + // ========== Verify Account Leaves ========== + let mut account_cursor = storage.account_hashed_cursor(block_ref.block.number + 10)?; + + let acc1_result = account_cursor.seek(account1_addr)?; + assert!(acc1_result.is_some(), "Account 1 should be found"); + assert_eq!(acc1_result.unwrap().0, account1_addr); + assert_eq!(acc1_result.unwrap().1.nonce, 1); + assert_eq!(acc1_result.unwrap().1.balance, U256::from(1000)); + + let acc2_result = account_cursor.seek(account2_addr)?; + assert!(acc2_result.is_some(), "Account 2 should be found"); + assert_eq!(acc2_result.unwrap().1.nonce, 2); + + // Deleted account should not be found + let deleted_acc_result = account_cursor.seek(deleted_account_addr)?; + assert!( + deleted_acc_result.is_none() || deleted_acc_result.unwrap().0 != deleted_account_addr, + "Deleted account should not be found" + ); + + // ========== Verify Storage Leaves ========== + let mut storage_cursor = + storage.storage_hashed_cursor(storage_addr, block_ref.block.number + 10)?; + + let slot1_result = storage_cursor.seek(B256::repeat_byte(0x01))?; + assert!(slot1_result.is_some(), "Storage slot 1 should be found"); + assert_eq!(slot1_result.unwrap().1, U256::from(111)); + + let slot2_result = storage_cursor.seek(B256::repeat_byte(0x02))?; + assert!(slot2_result.is_some(), "Storage slot 2 should be found"); + assert_eq!(slot2_result.unwrap().1, U256::from(222)); + + // Zero-valued storage should not be found (deleted) + let slot3_result = storage_cursor.seek(B256::repeat_byte(0x03))?; + assert!( + slot3_result.is_none() || slot3_result.unwrap().0 != B256::repeat_byte(0x03), + "Zero-valued storage slot should not be found" + ); + + // ========== Verify fetch_trie_updates can retrieve the data ========== + let fetched_diff = storage.fetch_trie_updates(block_ref.block.number)?; + + // Check that trie updates are stored + assert_eq!( + fetched_diff.sorted_trie_updates.account_nodes_ref().len(), + 3, + "Should have 3 account nodes, including removed" + ); + assert_eq!( + fetched_diff.sorted_trie_updates.storage_tries_ref().len(), + 1, + "Should have 1 storage trie" + ); + + // Check that post state is stored + assert_eq!( + fetched_diff.sorted_post_state.accounts.len(), + 3, + "Should have 3 accounts (including deleted)" + ); + assert_eq!(fetched_diff.sorted_post_state.storages.len(), 1, "Should have 1 storage entry"); + + Ok(()) +} + +/// Test that `replace_updates` properly applies hashed/trie storage updates to the DB +/// +/// This test verifies the bug fix where `replace_updates` was only storing `trie_updates` +/// and `post_states` directly without populating the internal data structures +/// (`hashed_accounts`, `hashed_storages`, `account_branches`, `storage_branches`). +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_replace_updates_applies_all_updates( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::{HashedStorage, updates::StorageTrieUpdates}; + + let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + + // ========== Setup: Store initial state at blocks 50, 100, 101 ========== + let initial_account_addr = B256::repeat_byte(0x10); + let initial_account = create_test_account_with_values(1, 1000, 0xAA); + + let initial_storage_addr = B256::repeat_byte(0x20); + let initial_storage_slot = B256::repeat_byte(0x01); + let initial_storage_value = U256::from(100); + + let initial_branch_path = nibbles_from(vec![1, 2, 3]); + let initial_branch = create_test_branch(); + + // Store initial data at block 50 + let mut initial_trie_updates_50 = TrieUpdates::default(); + initial_trie_updates_50.account_nodes.insert(initial_branch_path, initial_branch.clone()); + + let mut initial_post_state_50 = HashedPostState::default(); + initial_post_state_50.accounts.insert(initial_account_addr, Some(initial_account)); + + let initial_diff_50 = BlockStateDiff { + sorted_trie_updates: initial_trie_updates_50.into_sorted(), + sorted_post_state: initial_post_state_50.into_sorted(), + }; + storage.store_trie_updates(block_ref_50, initial_diff_50)?; + + // Store data at block 100 (common block) + let mut initial_trie_updates_100 = TrieUpdates::default(); + let common_branch_path = nibbles_from(vec![4, 5, 6]); + initial_trie_updates_100.account_nodes.insert(common_branch_path, initial_branch.clone()); + + let mut initial_post_state_100 = HashedPostState::default(); + let mut initial_storage_100 = HashedStorage::new(false); + initial_storage_100.storage.insert(initial_storage_slot, initial_storage_value); + initial_post_state_100.storages.insert(initial_storage_addr, initial_storage_100); + + let initial_diff_100 = BlockStateDiff { + sorted_trie_updates: initial_trie_updates_100.into_sorted(), + sorted_post_state: initial_post_state_100.into_sorted(), + }; + + let block_ref_100 = + BlockWithParent::new(block_ref_50.block.hash, NumHash::new(100, B256::repeat_byte(0x97))); + + storage.store_trie_updates(block_ref_100, initial_diff_100)?; + + // Store data at block 101 (will be replaced) + let mut initial_trie_updates_101 = TrieUpdates::default(); + let old_branch_path = nibbles_from(vec![7, 8, 9]); + initial_trie_updates_101.account_nodes.insert(old_branch_path, initial_branch); + + let mut initial_post_state_101 = HashedPostState::default(); + let old_account_addr = B256::repeat_byte(0x30); + let old_account = create_test_account_with_values(99, 9999, 0xFF); + initial_post_state_101.accounts.insert(old_account_addr, Some(old_account)); + + let initial_diff_101 = BlockStateDiff { + sorted_trie_updates: initial_trie_updates_101.into_sorted(), + sorted_post_state: initial_post_state_101.into_sorted(), + }; + let block_ref_101 = + BlockWithParent::new(block_ref_100.block.hash, NumHash::new(101, B256::repeat_byte(0x98))); + storage.store_trie_updates(block_ref_101, initial_diff_101)?; + + let block_ref_102 = + BlockWithParent::new(block_ref_101.block.hash, NumHash::new(102, B256::repeat_byte(0x99))); + + // ========== Verify initial state exists ========== + // Verify block 50 data exists + let mut cursor_initial = storage.account_trie_cursor(75)?; + assert!( + cursor_initial.seek_exact(initial_branch_path)?.is_some(), + "Initial branch should exist before replace" + ); + + // Verify block 101 old data exists + let mut cursor_old = storage.account_trie_cursor(150)?; + assert!( + cursor_old.seek_exact(old_branch_path)?.is_some(), + "Old branch at block 101 should exist before replace" + ); + + let mut account_cursor_old = storage.account_hashed_cursor(150)?; + assert!( + account_cursor_old.seek(old_account_addr)?.is_some(), + "Old account at block 101 should exist before replace" + ); + + // ========== Call replace_updates to replace blocks after 100 ========== + let mut blocks_to_add: Vec<(BlockWithParent, BlockStateDiff)> = Vec::default(); + + // New data for block 101 + let new_account_addr = B256::repeat_byte(0x40); + let new_account = create_test_account_with_values(5, 5000, 0xCC); + + let new_storage_addr = B256::repeat_byte(0x50); + let new_storage_slot = B256::repeat_byte(0x02); + let new_storage_value = U256::from(999); + + let new_branch_path = nibbles_from(vec![10, 11, 12]); + let new_branch = create_test_branch_variant(); + + let storage_branch_path = nibbles_from(vec![5, 5]); + let storage_hashed_addr = B256::repeat_byte(0x60); + + let mut new_trie_updates = TrieUpdates::default(); + new_trie_updates.account_nodes.insert(new_branch_path, new_branch.clone()); + + // Add storage trie updates + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_branch_path, new_branch.clone()); + new_trie_updates.insert_storage_updates(storage_hashed_addr, storage_trie); + + let mut new_post_state = HashedPostState::default(); + new_post_state.accounts.insert(new_account_addr, Some(new_account)); + + let mut new_storage = HashedStorage::new(false); + new_storage.storage.insert(new_storage_slot, new_storage_value); + new_post_state.storages.insert(new_storage_addr, new_storage); + + blocks_to_add.push(( + block_ref_101, + BlockStateDiff { + sorted_trie_updates: new_trie_updates.into_sorted(), + sorted_post_state: new_post_state.into_sorted(), + }, + )); + + // New data for block 102 + let block_102_account_addr = B256::repeat_byte(0x70); + let block_102_account = create_test_account_with_values(10, 10000, 0xDD); + + let mut trie_updates_102 = TrieUpdates::default(); + let block_102_branch_path = nibbles_from(vec![15, 14, 13]); + trie_updates_102.account_nodes.insert(block_102_branch_path, new_branch); + + let mut post_state_102 = HashedPostState::default(); + post_state_102.accounts.insert(block_102_account_addr, Some(block_102_account)); + + blocks_to_add.push(( + block_ref_102, + BlockStateDiff { + sorted_trie_updates: trie_updates_102.into_sorted(), + sorted_post_state: post_state_102.into_sorted(), + }, + )); + + // Execute replace_updates + storage.replace_updates(BlockNumHash::new(100, block_ref_100.block.hash), blocks_to_add)?; + // ========== Verify that data up to block 100 still exists ========== + let mut cursor_50 = storage.account_trie_cursor(75)?; + assert!( + cursor_50.seek_exact(initial_branch_path)?.is_some(), + "Block 50 branch should still exist after replace" + ); + + let mut cursor_100 = storage.account_trie_cursor(100)?; + assert!( + cursor_100.seek_exact(common_branch_path)?.is_some(), + "Block 100 branch should still exist after replace" + ); + + let mut storage_cursor_100 = storage.storage_hashed_cursor(initial_storage_addr, 100)?; + let result_100 = storage_cursor_100.seek(initial_storage_slot)?; + assert!(result_100.is_some(), "Block 100 storage should still exist after replace"); + assert_eq!( + result_100.unwrap().1, + initial_storage_value, + "Block 100 storage value should be unchanged" + ); + + // ========== Verify that old data after block 100 is gone ========== + let mut cursor_old_gone = storage.account_trie_cursor(150)?; + assert!( + cursor_old_gone.seek_exact(old_branch_path)?.is_none(), + "Old branch at block 101 should be removed after replace" + ); + + let mut account_cursor_old_gone = storage.account_hashed_cursor(150)?; + let old_acc_result = account_cursor_old_gone.seek(old_account_addr)?; + assert!( + old_acc_result.is_none() || old_acc_result.unwrap().0 != old_account_addr, + "Old account at block 101 should be removed after replace" + ); + + // ========== Verify new data is properly accessible via cursors ========== + + // Verify new account branch nodes + let mut trie_cursor = storage.account_trie_cursor(150)?; + let branch_result = trie_cursor.seek_exact(new_branch_path)?; + assert!(branch_result.is_some(), "New account branch should be accessible via cursor"); + assert_eq!(branch_result.unwrap().0, new_branch_path); + + // Verify new storage branch nodes + let mut storage_trie_cursor = storage.storage_trie_cursor(storage_hashed_addr, 150)?; + let storage_branch_result = storage_trie_cursor.seek_exact(storage_branch_path)?; + assert!(storage_branch_result.is_some(), "New storage branch should be accessible via cursor"); + assert_eq!(storage_branch_result.unwrap().0, storage_branch_path); + + // Verify new hashed accounts + let mut account_cursor = storage.account_hashed_cursor(150)?; + let account_result = account_cursor.seek(new_account_addr)?; + assert!(account_result.is_some(), "New account should be accessible via cursor"); + assert_eq!(account_result.as_ref().unwrap().0, new_account_addr); + assert_eq!(account_result.as_ref().unwrap().1.nonce, new_account.nonce); + assert_eq!(account_result.as_ref().unwrap().1.balance, new_account.balance); + assert_eq!(account_result.as_ref().unwrap().1.bytecode_hash, new_account.bytecode_hash); + + // Verify new hashed storages + let mut storage_cursor = storage.storage_hashed_cursor(new_storage_addr, 150)?; + let storage_result = storage_cursor.seek(new_storage_slot)?; + assert!(storage_result.is_some(), "New storage should be accessible via cursor"); + assert_eq!(storage_result.as_ref().unwrap().0, new_storage_slot); + assert_eq!(storage_result.as_ref().unwrap().1, new_storage_value); + + // Verify block 102 data + let mut trie_cursor_102 = storage.account_trie_cursor(150)?; + let branch_result_102 = trie_cursor_102.seek_exact(block_102_branch_path)?; + assert!(branch_result_102.is_some(), "Block 102 branch should be accessible"); + assert_eq!(branch_result_102.unwrap().0, block_102_branch_path); + + let mut account_cursor_102 = storage.account_hashed_cursor(150)?; + let account_result_102 = account_cursor_102.seek(block_102_account_addr)?; + assert!(account_result_102.is_some(), "Block 102 account should be accessible"); + assert_eq!(account_result_102.as_ref().unwrap().0, block_102_account_addr); + assert_eq!(account_result_102.as_ref().unwrap().1.nonce, block_102_account.nonce); + + // Verify fetch_trie_updates returns the new data + let fetched_101 = storage.fetch_trie_updates(101)?; + assert_eq!( + fetched_101.sorted_trie_updates.account_nodes_ref().len(), + 1, + "Should have 1 account branch node at block 101" + ); + assert!( + fetched_101 + .sorted_trie_updates + .account_nodes_ref() + .iter() + .any(|(addr, _)| *addr == new_branch_path), + "New branch path should be in trie_updates" + ); + assert_eq!( + fetched_101.sorted_post_state.accounts.len(), + 1, + "Should have 1 account at block 101" + ); + assert!( + fetched_101.sorted_post_state.accounts.iter().any(|(addr, _)| *addr == new_account_addr), + "New account should be in post_state" + ); + + Ok(()) +} + +/// Test that pure deletions (nodes only in `removed_nodes`) are properly stored +/// +/// This test verifies that when a node appears only in `removed_nodes` (not in updates), +/// it is properly stored as a deletion and subsequent queries return None for that path. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_pure_deletions_stored_correctly( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::updates::StorageTrieUpdates; + + // ========== Setup: Store initial branch nodes at block 50 ========== + let account_path1 = nibbles_from(vec![1, 2, 3]); + let account_path2 = nibbles_from(vec![4, 5, 6]); + let storage_path1 = nibbles_from(vec![7, 8, 9]); + let storage_path2 = nibbles_from(vec![10, 11, 12]); + let storage_address = B256::repeat_byte(0x42); + + let initial_branch = create_test_branch(); + + let mut initial_trie_updates = TrieUpdates::default(); + initial_trie_updates.account_nodes.insert(account_path1, initial_branch.clone()); + initial_trie_updates.account_nodes.insert(account_path2, initial_branch.clone()); + + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_path1, initial_branch.clone()); + storage_trie.storage_nodes.insert(storage_path2, initial_branch); + initial_trie_updates.insert_storage_updates(storage_address, storage_trie); + + let initial_diff = BlockStateDiff { + sorted_trie_updates: initial_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + + storage.store_trie_updates(block_ref_50, initial_diff)?; + + // Verify initial state exists at block 75 + let mut cursor_75 = storage.account_trie_cursor(75)?; + assert!( + cursor_75.seek_exact(account_path1)?.is_some(), + "Initial account branch 1 should exist at block 75" + ); + assert!( + cursor_75.seek_exact(account_path2)?.is_some(), + "Initial account branch 2 should exist at block 75" + ); + + let mut storage_cursor_75 = storage.storage_trie_cursor(storage_address, 75)?; + assert!( + storage_cursor_75.seek_exact(storage_path1)?.is_some(), + "Initial storage branch 1 should exist at block 75" + ); + assert!( + storage_cursor_75.seek_exact(storage_path2)?.is_some(), + "Initial storage branch 2 should exist at block 75" + ); + + // ========== At block 100: Mark paths as deleted (ONLY in removed_nodes) ========== + let mut deletion_trie_updates = TrieUpdates::default(); + + // Add to removed_nodes ONLY (no updates) + deletion_trie_updates.removed_nodes.insert(account_path1); + + // Do the same for storage branch + let mut deletion_storage_trie = StorageTrieUpdates::default(); + deletion_storage_trie.removed_nodes.insert(storage_path1); + deletion_trie_updates.insert_storage_updates(storage_address, deletion_storage_trie); + + let deletion_diff = BlockStateDiff { + sorted_trie_updates: deletion_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_100 = + BlockWithParent::new(B256::repeat_byte(0x96), NumHash::new(100, B256::repeat_byte(0x97))); + + storage.store_trie_updates(block_ref_100, deletion_diff)?; + + // ========== Verify that deleted nodes return None at block 150 ========== + + // Deleted account branch should not be found + let mut cursor_150 = storage.account_trie_cursor(150)?; + let account_result = cursor_150.seek_exact(account_path1)?; + assert!(account_result.is_none(), "Deleted account branch should return None at block 150"); + + // Non-deleted account branch should still exist + let account_result2 = cursor_150.seek_exact(account_path2)?; + assert!( + account_result2.is_some(), + "Non-deleted account branch should still exist at block 150" + ); + + // Deleted storage branch should not be found + let mut storage_cursor_150 = storage.storage_trie_cursor(storage_address, 150)?; + let storage_result = storage_cursor_150.seek_exact(storage_path1)?; + assert!(storage_result.is_none(), "Deleted storage branch should return None at block 150"); + + // Non-deleted storage branch should still exist + let storage_result2 = storage_cursor_150.seek_exact(storage_path2)?; + assert!( + storage_result2.is_some(), + "Non-deleted storage branch should still exist at block 150" + ); + + // ========== Verify that the nodes still exist at block 75 (before deletion) ========== + let mut cursor_75_after = storage.account_trie_cursor(75)?; + assert!( + cursor_75_after.seek_exact(account_path1)?.is_some(), + "Deleted node should still exist at block 75 (before deletion)" + ); + + let mut storage_cursor_75_after = storage.storage_trie_cursor(storage_address, 75)?; + assert!( + storage_cursor_75_after.seek_exact(storage_path1)?.is_some(), + "Deleted storage node should still exist at block 75 (before deletion)" + ); + + // ========== Verify iteration skips deleted nodes ========== + let mut cursor_iter = storage.account_trie_cursor(150)?; + let mut found_paths = Vec::new(); + while let Some((path, _)) = cursor_iter.next()? { + found_paths.push(path); + } + + assert!(!found_paths.contains(&account_path1), "Iteration should skip deleted node"); + assert!(found_paths.contains(&account_path2), "Iteration should include non-deleted node"); + + Ok(()) +} + +/// Test that updates take precedence over removals when both are present +/// +/// This test verifies that when a path appears in both `removed_nodes` and `account_nodes`, +/// the update from `account_nodes` takes precedence. This is critical for correctness +/// when processing trie updates that both remove and update the same node. +#[test_case(InMemoryProofsStorage::new(); "InMemory")] +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_updates_take_precedence_over_removals( + storage: S, +) -> Result<(), OpProofsStorageError> { + use reth_trie::updates::StorageTrieUpdates; + + // ========== Setup: Store initial branch nodes at block 50 ========== + let account_path = nibbles_from(vec![1, 2, 3]); + let storage_path = nibbles_from(vec![4, 5, 6]); + let storage_address = B256::repeat_byte(0x42); + + let initial_branch = create_test_branch(); + + let mut initial_trie_updates = TrieUpdates::default(); + initial_trie_updates.account_nodes.insert(account_path, initial_branch.clone()); + + let mut storage_trie = StorageTrieUpdates::default(); + storage_trie.storage_nodes.insert(storage_path, initial_branch.clone()); + initial_trie_updates.insert_storage_updates(storage_address, storage_trie); + + let initial_diff = BlockStateDiff { + sorted_trie_updates: initial_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_50 = BlockWithParent::new(B256::ZERO, NumHash::new(50, B256::repeat_byte(0x96))); + + storage.store_trie_updates(block_ref_50, initial_diff)?; + + // Verify initial state exists at block 75 + let mut cursor_75 = storage.account_trie_cursor(75)?; + assert!( + cursor_75.seek_exact(account_path)?.is_some(), + "Initial account branch should exist at block 75" + ); + + let mut storage_cursor_75 = storage.storage_trie_cursor(storage_address, 75)?; + assert!( + storage_cursor_75.seek_exact(storage_path)?.is_some(), + "Initial storage branch should exist at block 75" + ); + + // ========== At block 100: Add paths to BOTH removed_nodes AND account_nodes ========== + // This simulates a scenario where a node is both removed and updated + // The update should take precedence + let updated_branch = create_test_branch_variant(); + + let mut conflicting_trie_updates = TrieUpdates::default(); + + // Add to removed_nodes + conflicting_trie_updates.removed_nodes.insert(account_path); + + // Also add to account_nodes (this should take precedence) + conflicting_trie_updates.account_nodes.insert(account_path, updated_branch.clone()); + + // Do the same for storage branch + let mut conflicting_storage_trie = StorageTrieUpdates::default(); + conflicting_storage_trie.removed_nodes.insert(storage_path); + conflicting_storage_trie.storage_nodes.insert(storage_path, updated_branch.clone()); + conflicting_trie_updates.insert_storage_updates(storage_address, conflicting_storage_trie); + + let conflicting_diff = BlockStateDiff { + sorted_trie_updates: conflicting_trie_updates.into_sorted(), + sorted_post_state: HashedPostStateSorted::default(), + }; + + let block_ref_100 = + BlockWithParent::new(B256::repeat_byte(0x96), NumHash::new(100, B256::repeat_byte(0x97))); + + storage.store_trie_updates(block_ref_100, conflicting_diff)?; + + // ========== Verify that updates took precedence at block 150 ========== + + // Account branch should exist (not deleted) with the updated value + let mut cursor_150 = storage.account_trie_cursor(150)?; + let account_result = cursor_150.seek_exact(account_path)?; + assert!( + account_result.is_some(), + "Account branch should exist at block 150 (update should take precedence over removal)" + ); + let (found_path, found_branch) = account_result.unwrap(); + assert_eq!(found_path, account_path); + // Verify it's the updated branch, not the initial one + assert_eq!( + found_branch.state_mask, updated_branch.state_mask, + "Account branch should be the updated version, not the initial one" + ); + + // Storage branch should exist (not deleted) with the updated value + let mut storage_cursor_150 = storage.storage_trie_cursor(storage_address, 150)?; + let storage_result = storage_cursor_150.seek_exact(storage_path)?; + assert!( + storage_result.is_some(), + "Storage branch should exist at block 150 (update should take precedence over removal)" + ); + let (found_storage_path, found_storage_branch) = storage_result.unwrap(); + assert_eq!(found_storage_path, storage_path); + // Verify it's the updated branch + assert_eq!( + found_storage_branch.state_mask, updated_branch.state_mask, + "Storage branch should be the updated version, not the initial one" + ); + + // ========== Verify that the old version still exists at block 75 ========== + let mut cursor_75_after = storage.account_trie_cursor(75)?; + let result_75 = cursor_75_after.seek_exact(account_path)?; + assert!(result_75.is_some(), "Initial version should still exist at block 75"); + let (_, branch_75) = result_75.unwrap(); + assert_eq!( + branch_75.state_mask, initial_branch.state_mask, + "Block 75 should see the initial branch, not the updated one" + ); + + Ok(()) +} diff --git a/rust/op-reth/crates/trie/tests/live.rs b/rust/op-reth/crates/trie/tests/live.rs new file mode 100644 index 0000000000000..64f08632ff874 --- /dev/null +++ b/rust/op-reth/crates/trie/tests/live.rs @@ -0,0 +1,508 @@ +//! End-to-end test of the live trie collector. + +use alloy_consensus::{BlockHeader, Header, TxEip2930, constants::ETH_TO_WEI}; +use alloy_genesis::{Genesis, GenesisAccount}; +use alloy_primitives::{Address, B256, TxKind, U256, keccak256}; +use derive_more::Constructor; +use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS}; +use reth_db::Database; +use reth_db_common::init::init_genesis; +use reth_ethereum_primitives::{Block, BlockBody, Receipt, Transaction, TransactionSigned}; +use reth_evm::{ConfigureEvm, execute::Executor}; +use reth_evm_ethereum::EthEvmConfig; +use reth_node_api::{NodePrimitives, NodeTypesWithDB}; +use reth_optimism_trie::{ + MdbxProofsStorage, OpProofsStorage, OpProofsStorageError, initialize::InitializationJob, + live::LiveTrieCollector, +}; +use reth_primitives_traits::{Block as _, RecoveredBlock}; +use reth_provider::{ + BlockWriter as _, ExecutionOutcome, HashedPostStateProvider, LatestStateProviderRef, + ProviderFactory, StateRootProvider, + providers::{BlockchainProvider, ProviderNodeTypes}, + test_utils::create_test_provider_factory_with_chain_spec, +}; +use reth_revm::database::StateProviderDatabase; +use secp256k1::{Keypair, Secp256k1, rand::rng}; +use std::sync::Arc; +use tempfile::TempDir; + +/// Converts a secp256k1 public key to an Ethereum address. +fn public_key_to_address(pubkey: secp256k1::PublicKey) -> Address { + let hash = keccak256(&pubkey.serialize_uncompressed()[1..]); + Address::from_slice(&hash[12..]) +} + +/// Signs a transaction with the given keypair. +fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned { + use alloy_consensus::SignableTransaction; + use reth_primitives_traits::crypto::secp256k1::sign_message; + let secret = B256::from_slice(&key_pair.secret_bytes()); + let sig = sign_message(secret, tx.signature_hash()).unwrap(); + tx.into_signed(sig).into() +} + +/// Specification for a transaction within a block +#[derive(Debug, Clone)] +struct TxSpec { + /// Recipient address for the transaction + to: Address, + /// Value to transfer (in wei) + value: U256, + /// Nonce for the transaction (will be automatically assigned if None) + nonce: Option, +} + +impl TxSpec { + /// Create a simple transfer transaction + const fn transfer(to: Address, value: U256) -> Self { + Self { to, value, nonce: None } + } +} + +/// Specification for a block in the test chain +#[derive(Debug, Clone, Constructor)] +struct BlockSpec { + /// Transactions to include in this block + txs: Vec, +} + +/// Configuration for a test scenario +#[derive(Debug, Constructor)] +struct TestScenario { + /// Blocks to execute before running the initialization job + blocks_before_initialization: Vec, + /// Blocks to execute after initialization using the live collector + blocks_after_initialization: Vec, +} + +/// Helper to create a chain spec with a genesis account funded +fn chain_spec_with_address(address: Address) -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: [( + address, + GenesisAccount { balance: U256::from(10 * ETH_TO_WEI), ..Default::default() }, + )] + .into(), + ..MAINNET.genesis.clone() + }) + .paris_activated() + .build(), + ) +} + +/// Creates a block from a spec, executing transactions with the given keypair +fn create_block_from_spec( + spec: &BlockSpec, + block_number: u64, + parent_hash: B256, + chain_spec: &Arc, + key_pair: Keypair, + nonce_counter: &mut u64, +) -> RecoveredBlock { + let transactions: Vec = spec + .txs + .iter() + .map(|tx_spec| { + let nonce = tx_spec.nonce.unwrap_or_else(|| { + let current = *nonce_counter; + *nonce_counter += 1; + current + }); + + sign_tx_with_key_pair( + key_pair, + TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce, + gas_limit: MIN_TRANSACTION_GAS, + gas_price: 1_500_000_000, + to: TxKind::Call(tx_spec.to), + value: tx_spec.value, + ..Default::default() + } + .into(), + ) + }) + .collect(); + + let gas_total = transactions.len() as u64 * MIN_TRANSACTION_GAS; + + Block { + header: Header { + parent_hash, + receipts_root: alloy_primitives::b256!( + "0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: block_number, + gas_limit: gas_total.max(MIN_TRANSACTION_GAS), + gas_used: gas_total, + state_root: B256::ZERO, // Will be calculated by executor + ..Default::default() + }, + body: BlockBody { transactions, ..Default::default() }, + } + .try_into_recovered() + .unwrap() +} + +/// Executes a block and returns the updated block with correct state root +fn execute_block( + block: &mut RecoveredBlock, + provider_factory: &ProviderFactory, + chain_spec: &Arc, +) -> eyre::Result> +where + N: ProviderNodeTypes< + Primitives: NodePrimitives, + > + NodeTypesWithDB, +{ + let provider = provider_factory.provider()?; + let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + let block_executor = evm_config.batch_executor(db); + + let execution_result = block_executor.execute(block)?; + + let hashed_state = + LatestStateProviderRef::new(&provider).hashed_post_state(&execution_result.state); + let state_root = LatestStateProviderRef::new(&provider).state_root(hashed_state)?; + + block.set_state_root(state_root); + + Ok(execution_result) +} + +/// Commits a block and its execution output to the database +fn commit_block_to_database( + block: &RecoveredBlock, + execution_output: &reth_evm::execute::BlockExecutionOutput, + provider_factory: &ProviderFactory, +) -> eyre::Result<()> +where + N: ProviderNodeTypes< + Primitives: NodePrimitives, + > + NodeTypesWithDB, +{ + let execution_outcome = ExecutionOutcome { + bundle: execution_output.state.clone(), + receipts: vec![execution_output.receipts.clone()], + first_block: block.number(), + requests: vec![execution_output.requests.clone()], + }; + + // Calculate hashed state from execution result + let state_provider = provider_factory.provider()?; + let hashed_state = HashedPostStateProvider::hashed_post_state( + &LatestStateProviderRef::new(&state_provider), + &execution_output.state, + ); + + let provider_rw = provider_factory.provider_rw()?; + provider_rw.append_blocks_with_state( + vec![block.clone()], + &execution_outcome, + hashed_state.into_sorted(), + )?; + provider_rw.commit()?; + + Ok(()) +} + +/// Runs a test scenario with the given configuration +fn run_test_scenario( + scenario: TestScenario, + provider_factory: ProviderFactory, + chain_spec: Arc, + key_pair: Keypair, + storage: OpProofsStorage>, +) -> eyre::Result<()> +where + N: ProviderNodeTypes< + Primitives: NodePrimitives, + > + NodeTypesWithDB, +{ + let genesis_hash = chain_spec.genesis_hash(); + let mut nonce_counter = 0u64; + let mut last_block_hash = genesis_hash; + let mut last_block_number = 0u64; + + // Execute blocks before initialization + for (idx, block_spec) in scenario.blocks_before_initialization.iter().enumerate() { + let block_number = idx as u64 + 1; + let mut block = create_block_from_spec( + block_spec, + block_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; + commit_block_to_database(&block, &execution_output, &provider_factory)?; + + last_block_hash = block.hash(); + last_block_number = block_number; + } + + { + let provider = provider_factory.db_ref(); + let tx = provider.tx()?; + let initialization_job = InitializationJob::new(storage.clone(), tx); + initialization_job.run(last_block_number, last_block_hash)?; + } + + // Execute blocks after initialization using live collector + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + + for (idx, block_spec) in scenario.blocks_after_initialization.iter().enumerate() { + let block_number = last_block_number + idx as u64 + 1; + let mut block = create_block_from_spec( + block_spec, + block_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + // Execute the block to get the correct state root + let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; + + // Create a fresh blockchain provider to ensure it sees all committed blocks + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; + let live_trie_collector = + LiveTrieCollector::new(evm_config.clone(), blockchain_db, &storage); + + // Use the live collector to execute and store trie updates + live_trie_collector.execute_and_store_block_updates(&block)?; + + // Commit the block to the database so subsequent blocks can build on it + commit_block_to_database(&block, &execution_output, &provider_factory)?; + + last_block_hash = block.hash(); + } + + Ok(()) +} + +/// End-to-end test of a single live collector iteration. +/// (1) Creates a chain with some state +/// (2) Stores the genesis state into storage via initialization +/// (3) Executes a block and calculates the state root using the stored state +#[test] +fn test_execute_and_store_block_updates() { + let dir = TempDir::new().unwrap(); + let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); + + // Create a keypair for signing transactions + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut rng()); + let sender = public_key_to_address(key_pair.public_key()); + + // Create chain spec with the sender address funded in genesis + let chain_spec = chain_spec_with_address(sender); + + // Create test database and provider factory + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + + // Insert genesis state into the database + init_genesis(&provider_factory).unwrap(); + + // Define the test scenario: + // - No blocks before initialization + // - Initialization to genesis (block 0) + // - Execute one block with a single transaction after initialization + let recipient = Address::repeat_byte(0x42); + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); +} + +#[test] +fn test_execute_and_store_block_updates_missing_parent_block() { + let dir = TempDir::new().unwrap(); + let storage: OpProofsStorage> = + Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + // No blocks before initialization; initialization only inserts genesis. + let scenario = TestScenario::new(vec![], vec![]); + + // Run initialization (block 0 only) + run_test_scenario( + scenario, + provider_factory.clone(), + chain_spec.clone(), + key_pair, + storage.clone(), + ) + .unwrap(); + + // Create a block whose parent block number is missing. + let incorrect_block_number = 2; + let incorrect_parent_hash = B256::repeat_byte(0x11); + + let mut nonce_counter = 0; + let incorrect_block = create_block_from_spec( + &BlockSpec::new(vec![]), + incorrect_block_number, + incorrect_parent_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + let blockchain_db = BlockchainProvider::new(provider_factory).unwrap(); + let collector = + LiveTrieCollector::new(EthEvmConfig::ethereum(chain_spec.clone()), blockchain_db, &storage); + + // EXPECT: MissingParentBlock + let err = collector.execute_and_store_block_updates(&incorrect_block).unwrap_err(); + + assert!(matches!(err, OpProofsStorageError::MissingParentBlock { .. })); +} + +#[test] +fn test_execute_and_store_block_updates_state_root_mismatch() { + let dir = TempDir::new().unwrap(); + let storage: OpProofsStorage> = + Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + // Run normal scenario: no blocks before initialization, one block after. + let recipient = Address::repeat_byte(0x42); + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], + ); + + run_test_scenario( + scenario, + provider_factory.clone(), + chain_spec.clone(), + key_pair, + storage.clone(), + ) + .unwrap(); + + // Generate a second block normally + let blockchain_db = BlockchainProvider::new(provider_factory.clone()).unwrap(); + let collector = + LiveTrieCollector::new(EthEvmConfig::ethereum(chain_spec.clone()), blockchain_db, &storage); + + // Create the next block + let mut nonce_counter = 0; + let last_block_hash = chain_spec.genesis_hash(); // because scenario executes 1 block + let next_number = 2; + + let mut block = create_block_from_spec( + &BlockSpec::new(vec![]), + next_number, + last_block_hash, + &chain_spec, + key_pair, + &mut nonce_counter, + ); + + // Execute it to compute a correct state root + let _ = execute_block(&mut block, &provider_factory, &chain_spec).unwrap(); + + // Change the state root to induce the error + block.header_mut().state_root = B256::repeat_byte(0xAA); + + // EXPECT: StateRootMismatch + let err = collector.execute_and_store_block_updates(&block).unwrap_err(); + + assert!(matches!(err, OpProofsStorageError::StateRootMismatch { .. })); +} + +/// Test with multiple blocks before and after initialization +#[test] +fn test_multiple_blocks_before_and_after_initialization() { + let dir = TempDir::new().unwrap(); + let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + // Define the test scenario: + // - Execute 3 blocks before initialization (will be committed to db) + // - Initialization to block 3 + // - Execute 2 more blocks using the live collector + let recipient1 = Address::repeat_byte(0x42); + let recipient2 = Address::repeat_byte(0x43); + let recipient3 = Address::repeat_byte(0x44); + + let scenario = TestScenario::new( + vec![ + BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(1))]), + BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(2))]), + BlockSpec::new(vec![TxSpec::transfer(recipient3, U256::from(3))]), + ], + vec![ + BlockSpec::new(vec![TxSpec::transfer(recipient1, U256::from(4))]), + BlockSpec::new(vec![TxSpec::transfer(recipient2, U256::from(5))]), + ], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); +} + +/// Test with blocks containing multiple transactions +#[test] +fn test_blocks_with_multiple_transactions() { + let dir = TempDir::new().unwrap(); + let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); + + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut rng()); + let sender = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec_with_address(sender); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory).unwrap(); + + let recipient1 = Address::repeat_byte(0x42); + let recipient2 = Address::repeat_byte(0x43); + let recipient3 = Address::repeat_byte(0x44); + + // Block with 3 transactions + let scenario = TestScenario::new( + vec![], + vec![BlockSpec::new(vec![ + TxSpec::transfer(recipient1, U256::from(1)), + TxSpec::transfer(recipient2, U256::from(2)), + TxSpec::transfer(recipient3, U256::from(3)), + ])], + ); + + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); +}