diff --git a/CLAUDE.md b/CLAUDE.md index 99282fbf864..c7a709c6713 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -236,6 +236,85 @@ Common refactoring pattern: - Add trait bounds for flexibility - Enable reuse across different chain types (Ethereum, Optimism) +#### When to Comment + +Write comments that remain valuable after the PR is merged. Future readers won't have PR context - they only see the current code. + +##### ✅ DO: Add Value + +**Explain WHY and non-obvious behavior:** +```rust +// Process must handle allocations atomically to prevent race conditions +// between dealloc on drop and concurrent limit checks +unsafe impl GlobalAlloc for LimitedAllocator { ... } + +// Binary search requires sorted input. Panics on unsorted slices. +fn find_index(items: &[Item], target: &Item) -> Option + +// Timeout set to 5s to match EVM block processing limits +const TRACER_TIMEOUT: Duration = Duration::from_secs(5); +``` + +**Document constraints and assumptions:** +```rust +/// Returns heap size estimate. +/// +/// Note: May undercount shared references (Rc/Arc). For precise +/// accounting, combine with an allocator-based approach. +fn deep_size_of(&self) -> usize +``` + +**Explain complex logic:** +```rust +// We reset limits at task start because tokio reuses threads in +// spawn_blocking pool. Without reset, second task inherits first +// task's allocation count and immediately hits limit. +THREAD_ALLOCATED.with(|allocated| allocated.set(0)); +``` + +##### ❌ DON'T: Describe Changes +```rust +// ❌ BAD - Describes the change, not the code +// Changed from Vec to HashMap for O(1) lookups + +// ✅ GOOD - Explains the decision +// HashMap provides O(1) symbol lookups during trace replay +``` +```rust +// ❌ BAD - PR-specific context +// Fix for issue #234 where memory wasn't freed + +// ✅ GOOD - Documents the actual behavior +// Explicitly drop allocations before limit check to ensure +// accurate accounting +``` +```rust +// ❌ BAD - States the obvious +// Increment counter +counter += 1; + +// ✅ GOOD - Explains non-obvious purpose +// Track allocations across all threads for global limit enforcement +GLOBAL_COUNTER.fetch_add(1, Ordering::SeqCst); +``` + +✅ **Comment when:** +- Non-obvious behavior or edge cases +- Performance trade-offs +- Safety requirements (unsafe blocks must always be documented) +- Limitations or gotchas +- Why simpler alternatives don't work + +❌ **Don't comment when:** +- Code is self-explanatory +- Just restating the code in English +- Describing what changed in this PR + +##### The Test: "Will this make sense in 6 months?" + +Before adding a comment, ask: Would someone reading just the current code (no PR, no history) find this helpful? + + ### Example Contribution Workflow Let's say you want to fix a bug where external IP resolution fails on startup: diff --git a/Cargo.lock b/Cargo.lock index 9f1dd8930a4..282ee811050 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0dd3ed764953a6b20458b2b7abbfdc93d20d14b38babe1a70fe631a443a9f1" +checksum = "b9b151e38e42f1586a01369ec52a6934702731d07e8509a7307331b09f6c46dc" dependencies = [ "alloy-eips", "alloy-primitives", @@ -139,9 +139,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9556182afa73cddffa91e64a5aa9508d5e8c912b3a15f26998d2388a824d2c7b" +checksum = "6e2d5e8668ef6215efdb7dcca6f22277b4e483a5650e05f5de22b2350971f4b8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03df5cb3b428ac96b386ad64c11d5c6e87a5505682cf1fbd6f8f773e9eda04f6" +checksum = "630288cf4f3a34a8c6bc75c03dce1dbd47833138f65f37d53a1661eafc96b83f" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -237,9 +237,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305fa99b538ca7006b0c03cfed24ec6d82beda67aac857ef4714be24231d15e6" +checksum = "e5434834adaf64fa20a6fb90877bc1d33214c41b055cc49f82189c98614368cc" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -285,9 +285,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1421f6c9d15e5b86afbfe5865ca84dea3b9f77173a0963c1a2ee4e626320ada9" +checksum = "919a8471cfbed7bcd8cf1197a57dda583ce0e10c6385f6ff4e8b41304b223392" dependencies = [ "alloy-eips", "alloy-primitives", @@ -325,9 +325,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f763621707fa09cece30b73ecc607eb43fd7a72451fe3b46f645b905086926" +checksum = "d7c69f6c9c68a1287c9d5ff903d0010726934de0dac10989be37b75a29190d55" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -340,9 +340,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f59a869fa4b4c3a7f08b1c8cb79aec61c29febe6e24a24fe0fcfded8a9b5703" +checksum = "8eaf2ae05219e73e0979cb2cf55612aafbab191d130f203079805eaf881cca58" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -366,9 +366,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "223612259a080160ce839a4e5df0125ca403a1d5e7206cc911cea54af5d769aa" +checksum = "e58f4f345cef483eab7374f2b6056973c7419ffe8ad35e994b7a7f5d8e0c7ba4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -440,9 +440,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77818b7348bd5486491a5297579dbfe5f706a81f8e1f5976393025f1e22a7c7d" +checksum = "de2597751539b1cc8fe4204e5325f9a9ed83fcacfb212018dfcfa7877e76de21" dependencies = [ "alloy-chains", "alloy-consensus", @@ -485,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249b45103a66c9ad60ad8176b076106d03a2399a37f0ee7b0e03692e6b354cb9" +checksum = "06e45a68423e732900a0c824b8e22237db461b79d2e472dd68b7547c16104427" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -529,9 +529,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2430d5623e428dd012c6c2156ae40b7fe638d6fca255e3244e0fba51fa698e93" +checksum = "edf8eb8be597cfa8c312934d2566ec4516f066d69164f9212d7a148979fdcfd8" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -555,9 +555,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e131624d08a25cfc40557041e7dc42e1182fa1153e7592d120f769a1edce56" +checksum = "339af7336571dd39ae3a15bde08ae6a647e62f75350bd415832640268af92c06" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -568,9 +568,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59407723b1850ebaa49e46d10c2ba9c10c10b3aedf2f7e97015ee23c3f4e639" +checksum = "19b33cdc0483d236cdfff763dae799ccef9646e94fb549a74f7adac6a7f7bb86" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -580,9 +580,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65e3266095e6d8e8028aab5f439c6b8736c5147314f7e606c61597e014cb8a0" +checksum = "83d98fb386a462e143f5efa64350860af39950c49e7c0cbdba419c16793116ef" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -592,9 +592,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07429a1099cd17227abcddb91b5e38c960aaeb02a6967467f5bb561fbe716ac6" +checksum = "fbde0801a32d21c5f111f037bee7e22874836fba7add34ed4a6919932dd7cf23" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -603,13 +603,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e0e876b20eb9debf316d3e875536f389070635250f22b5a678cf4632a3e0cf" +checksum = "55c8d51ebb7c5fa8be8ea739a3933c5bfea08777d2d662b30b2109ac5ca71e6b" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "serde", @@ -622,9 +623,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeff305b7d10cc1c888456d023e7bb8a5ea82e9e42b951e37619b88cc1a1486d" +checksum = "388cf910e66bd4f309a81ef746dcf8f9bca2226e3577890a8d56c5839225cf46" dependencies = [ "alloy-primitives", "derive_more", @@ -634,9 +635,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222ecadcea6aac65e75e32b6735635ee98517aa63b111849ee01ae988a71d685" +checksum = "605ec375d91073851f566a3082548af69a28dca831b27a8be7c1b4c49f5c6ca2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -655,9 +656,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7d47bca1a2a1541e4404aa38b7e262bb4dffd9ac23b4f178729a4ddc5a5caa" +checksum = "361cd87ead4ba7659bda8127902eda92d17fa7ceb18aba1676f7be10f7222487" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -668,7 +669,7 @@ dependencies = [ "alloy-serde", "alloy-sol-types", "arbitrary", - "itertools 0.14.0", + "itertools 0.13.0", "serde", "serde_json", "serde_with", @@ -677,9 +678,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a60d4baadd3f278faa4e2305cca095dfd4ab286e071b768ff09181d8ae215" +checksum = "1397926d8d06a2531578bafc3e0ec78f97a02f0e6d1631c67d80d22af6a3af02" dependencies = [ "alloy-consensus", "alloy-eips", @@ -692,9 +693,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c331c8e48665607682e8a9549a2347c13674d4fbcbdc342e7032834eba2424f4" +checksum = "de4e95fb0572b97b17751d0fdf5cdc42b0050f9dd9459eddd1bf2e2fbfed0a33" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -706,9 +707,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864f41befa90102d4e02327679699a7e9510930e2924c529e31476086609fa89" +checksum = "cddde1bbd4feeb0d363ae7882af1e2e7955ef77c17f933f31402aad9343b57c5" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -718,9 +719,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8468f1a7f9ee3bae73c24eead0239abea720dbf7779384b9c7e20d51bfb6b0" +checksum = "64600fc6c312b7e0ba76f73a381059af044f4f21f43e07f51f1fa76c868fe302" dependencies = [ "alloy-primitives", "arbitrary", @@ -730,9 +731,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53410a18a61916e2c073a6519499514e027b01e77eeaf96acd1df7cf96ef6bb2" +checksum = "5772858492b26f780468ae693405f895d6a27dea6e3eab2c36b6217de47c2647" dependencies = [ "alloy-primitives", "async-trait", @@ -745,9 +746,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6006c4cbfa5d08cadec1fcabea6cb56dc585a30a9fce40bcf81e307d6a71c8e" +checksum = "f4195b803d0a992d8dbaab2ca1986fc86533d4bc80967c0cce7668b26ad99ef9" dependencies = [ "alloy-consensus", "alloy-network", @@ -834,9 +835,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94ee404368a3d9910dfe61b203e888c6b0e151a50e147f95da8baff9f9c7763" +checksum = "025a940182bddaeb594c26fe3728525ae262d0806fe6a4befdf5d7bc13d54bce" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -858,9 +859,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f8a6338d594f6c6481292215ee8f2fd7b986c80aba23f3f44e761a8658de78" +checksum = "e3b5064d1e1e1aabc918b5954e7fb8154c39e77ec6903a581b973198b26628fa" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -873,9 +874,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17a37a8ca18006fa0a58c7489645619ff58cfa073f2b29c4e052c9bd114b123a" +checksum = "d47962f3f1d9276646485458dc842b4e35675f42111c9d814ae4711c664c8300" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -893,9 +894,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "679b0122b7bca9d4dc5eb2c0549677a3c53153f6e232f23f4b3ba5575f74ebde" +checksum = "9476a36a34e2fb51b6746d009c53d309a186a825aa95435407f0e07149f4ad2d" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -931,9 +932,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf39928a5e70c9755d6811a2928131b53ba785ad37c8bf85c90175b5d43b818" +checksum = "f8e52276fdb553d3c11563afad2898f4085165e4093604afe3d78b69afbf408f" dependencies = [ "alloy-primitives", "darling 0.21.3", @@ -7481,6 +7482,7 @@ dependencies = [ "serde", "serde_json", "tar", + "tempfile", "tokio", "tokio-stream", "toml", diff --git a/Cargo.toml b/Cargo.toml index a781e3b6047..7d75c8da560 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -494,33 +494,33 @@ alloy-trie = { version = "0.9.1", default-features = false } alloy-hardforks = "0.4.0" -alloy-consensus = { version = "1.0.37", default-features = false } -alloy-contract = { version = "1.0.37", default-features = false } -alloy-eips = { version = "1.0.37", default-features = false } -alloy-genesis = { version = "1.0.37", default-features = false } -alloy-json-rpc = { version = "1.0.37", default-features = false } -alloy-network = { version = "1.0.37", default-features = false } -alloy-network-primitives = { version = "1.0.37", default-features = false } -alloy-provider = { version = "1.0.37", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.37", default-features = false } -alloy-rpc-client = { version = "1.0.37", default-features = false } -alloy-rpc-types = { version = "1.0.37", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.37", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.37", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.37", default-features = false } -alloy-rpc-types-debug = { version = "1.0.37", default-features = false } -alloy-rpc-types-engine = { version = "1.0.37", default-features = false } -alloy-rpc-types-eth = { version = "1.0.37", default-features = false } -alloy-rpc-types-mev = { version = "1.0.37", default-features = false } -alloy-rpc-types-trace = { version = "1.0.37", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.37", default-features = false } -alloy-serde = { version = "1.0.37", default-features = false } -alloy-signer = { version = "1.0.37", default-features = false } -alloy-signer-local = { version = "1.0.37", default-features = false } -alloy-transport = { version = "1.0.37" } -alloy-transport-http = { version = "1.0.37", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.37", default-features = false } -alloy-transport-ws = { version = "1.0.37", default-features = false } +alloy-consensus = { version = "1.0.41", default-features = false } +alloy-contract = { version = "1.0.41", default-features = false } +alloy-eips = { version = "1.0.41", default-features = false } +alloy-genesis = { version = "1.0.41", default-features = false } +alloy-json-rpc = { version = "1.0.41", default-features = false } +alloy-network = { version = "1.0.41", default-features = false } +alloy-network-primitives = { version = "1.0.41", default-features = false } +alloy-provider = { version = "1.0.41", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.41", default-features = false } +alloy-rpc-client = { version = "1.0.41", default-features = false } +alloy-rpc-types = { version = "1.0.41", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.41", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.41", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.41", default-features = false } +alloy-rpc-types-debug = { version = "1.0.41", default-features = false } +alloy-rpc-types-engine = { version = "1.0.41", default-features = false } +alloy-rpc-types-eth = { version = "1.0.41", default-features = false } +alloy-rpc-types-mev = { version = "1.0.41", default-features = false } +alloy-rpc-types-trace = { version = "1.0.41", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.41", default-features = false } +alloy-serde = { version = "1.0.41", default-features = false } +alloy-signer = { version = "1.0.41", default-features = false } +alloy-signer-local = { version = "1.0.41", default-features = false } +alloy-transport = { version = "1.0.41" } +alloy-transport-http = { version = "1.0.41", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.41", default-features = false } +alloy-transport-ws = { version = "1.0.41", default-features = false } # op alloy-op-evm = { version = "0.22.0", default-features = false } diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index cba12995015..d21c83ae7c4 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -54,6 +54,7 @@ reth-testing-utils.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true rand.workspace = true +revm-state.workspace = true criterion.workspace = true [features] diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index a8a08430566..dd6afc8db1a 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -77,22 +77,22 @@ where self.inner.finalized_block.borrow().clone() } - /// Returns the canonical head of the chain. + /// Returns the `BlockNumHash` of the canonical head. pub fn get_canonical_num_hash(&self) -> BlockNumHash { self.inner.canonical_head.read().num_hash() } - /// Returns the canonical head of the chain. + /// Returns the block number of the canonical head. pub fn get_canonical_block_number(&self) -> BlockNumber { self.inner.canonical_head_number.load(Ordering::Relaxed) } - /// Returns the safe header of the chain. + /// Returns the `BlockNumHash` of the safe header. pub fn get_safe_num_hash(&self) -> Option { self.inner.safe_block.borrow().as_ref().map(SealedHeader::num_hash) } - /// Returns the finalized header of the chain. + /// Returns the `BlockNumHash` of the finalized header. pub fn get_finalized_num_hash(&self) -> Option { self.inner.finalized_block.borrow().as_ref().map(SealedHeader::num_hash) } diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 5b2f666657b..a6c85538107 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -570,7 +570,7 @@ pub struct BlockState { /// The executed block that determines the state after this block has been executed. block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>>, + parent: Option>, } impl BlockState { diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 2e1efd1ed1b..254edb248b4 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -61,6 +61,13 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { ) }) } + + fn merged_hashed_storage(&self, address: Address, storage: HashedStorage) -> HashedStorage { + let state = &self.trie_input().state; + let mut hashed = state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed.extend(&storage); + hashed + } } impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> { @@ -145,11 +152,8 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, impl StorageRootProvider for MemoryOverlayStateProviderRef<'_, N> { // TODO: Currently this does not reuse available in-memory trie nodes. fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_root(address, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_root(address, merged) } // TODO: Currently this does not reuse available in-memory trie nodes. @@ -159,11 +163,8 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slot: B256, storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_proof(address, slot, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_proof(address, slot, merged) } // TODO: Currently this does not reuse available in-memory trie nodes. @@ -173,11 +174,8 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slots: &[B256], storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_multiproof(address, slots, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_multiproof(address, slots, merged) } } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 242cc6d5d9d..da1a5318f25 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -99,6 +99,8 @@ proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] reth-ethereum-cli.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +tempfile.workspace = true [features] default = [] diff --git a/crates/cli/commands/src/import_core.rs b/crates/cli/commands/src/import_core.rs index 2370ebaa039..98f888bb9e3 100644 --- a/crates/cli/commands/src/import_core.rs +++ b/crates/cli/commands/src/import_core.rs @@ -102,6 +102,9 @@ where .sealed_header(provider_factory.last_block_number()?)? .expect("should have genesis"); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + while let Some(file_client) = reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? { @@ -121,7 +124,7 @@ where provider_factory.clone(), &consensus, Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + static_file_producer.clone(), import_config.no_state, executor.clone(), )?; diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 68618361e7f..4b5c51585b3 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_consensus::BlockHeader as AlloyBlockHeader; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{Sealable, B256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -13,7 +13,7 @@ use reth_provider::{ BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{io::BufReader, path::PathBuf, str::FromStr, sync::Arc}; +use std::{io::BufReader, path::PathBuf, sync::Arc}; use tracing::info; pub mod without_evm; @@ -58,13 +58,9 @@ pub struct InitStateCommand { #[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)] pub header: Option, - /// Total difficulty of the header. - #[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)] - pub total_difficulty: Option, - /// Hash of the header. #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] - pub header_hash: Option, + pub header_hash: Option, } impl> InitStateCommand { @@ -88,16 +84,9 @@ impl> InitStateC let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; let header = without_evm::read_header_from_file::< ::BlockHeader, - >(header)?; - - let header_hash = - self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; - let header_hash = B256::from_str(&header_hash)?; + >(&header)?; - let total_difficulty = self - .total_difficulty - .ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?; - let total_difficulty = U256::from_str(&total_difficulty)?; + let header_hash = self.header_hash.unwrap_or_else(|| header.hash_slow()); let last_block_number = provider_rw.last_block_number()?; @@ -105,7 +94,6 @@ impl> InitStateC without_evm::setup_without_evm( &provider_rw, SealedHeader::new(header, header_hash), - total_difficulty, |number| { let mut header = <::BlockHeader>::default(); @@ -146,3 +134,32 @@ impl InitStateCommand { Some(&self.env.chain) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::b256; + use reth_ethereum_cli::chainspec::EthereumChainSpecParser; + + #[test] + fn parse_init_state_command_with_without_evm() { + let cmd: InitStateCommand = InitStateCommand::parse_from([ + "reth", + "--chain", + "sepolia", + "--without-evm", + "--header", + "header.rlp", + "--header-hash", + "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "state.jsonl", + ]); + assert_eq!(cmd.state.to_str().unwrap(), "state.jsonl"); + assert!(cmd.without_evm); + assert_eq!(cmd.header.unwrap().to_str().unwrap(), "header.rlp"); + assert_eq!( + cmd.header_hash.unwrap(), + b256!("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + ); + } +} diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 09711d45880..de6320fc86e 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -10,16 +10,22 @@ use reth_provider::{ }; use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; -use std::{fs::File, io::Read, path::PathBuf}; +use std::path::Path; use tracing::info; + /// Reads the header RLP from a file and returns the Header. -pub(crate) fn read_header_from_file(path: PathBuf) -> Result +/// +/// This supports both raw rlp bytes and rlp hex string. +pub(crate) fn read_header_from_file(path: &Path) -> Result where H: Decodable, { - let mut file = File::open(path)?; - let mut buf = Vec::new(); - file.read_to_end(&mut buf)?; + let buf = if let Ok(content) = reth_fs_util::read_to_string(path) { + alloy_primitives::hex::decode(content.trim())? + } else { + // If UTF-8 decoding fails, read as raw bytes + reth_fs_util::read(path)? + }; let header = H::decode(&mut &buf[..])?; Ok(header) @@ -30,7 +36,6 @@ where pub fn setup_without_evm( provider_rw: &Provider, header: SealedHeader<::BlockHeader>, - total_difficulty: U256, header_factory: F, ) -> ProviderResult<()> where @@ -50,7 +55,7 @@ where info!(target: "reth::cli", "Appending first valid block."); - append_first_block(provider_rw, &header, total_difficulty)?; + append_first_block(provider_rw, &header)?; for stage in StageId::ALL { provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number()))?; @@ -68,7 +73,6 @@ where fn append_first_block( provider_rw: &Provider, header: &SealedHeaderFor, - total_difficulty: U256, ) -> ProviderResult<()> where Provider: BlockWriter::Block> @@ -85,16 +89,8 @@ where let sf_provider = provider_rw.static_file_provider(); - sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( - header, - total_difficulty, - &header.hash(), - )?; - sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number())?; - sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number())?; - Ok(()) } @@ -167,3 +163,85 @@ where Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Header; + use alloy_primitives::{address, b256}; + use reth_db_common::init::init_genesis; + use reth_provider::{test_utils::create_test_provider_factory, DatabaseProviderFactory}; + use std::io::Write; + use tempfile::NamedTempFile; + + #[test] + fn test_read_header_from_file_hex_string() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(header_rlp.as_bytes()).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + + assert_eq!(header.number, 1700); + assert_eq!( + header.parent_hash, + b256!("0d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dd") + ); + assert_eq!(header.beneficiary, address!("71562b71999873db5b286df957af199ec94617f7")); + } + + #[test] + fn test_read_header_from_file_raw_bytes() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + let header_bytes = + alloy_primitives::hex::decode(header_rlp.trim_start_matches("0x")).unwrap(); + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(&header_bytes).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + + assert_eq!(header.number, 1700); + assert_eq!( + header.parent_hash, + b256!("0d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dd") + ); + assert_eq!(header.beneficiary, address!("71562b71999873db5b286df957af199ec94617f7")); + } + + #[test] + fn test_setup_without_evm_succeeds() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + let header_bytes = + alloy_primitives::hex::decode(header_rlp.trim_start_matches("0x")).unwrap(); + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(&header_bytes).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + let header_hash = b256!("4f05e4392969fc82e41f6d6a8cea379323b0b2d3ddf7def1a33eec03883e3a33"); + + let provider_factory = create_test_provider_factory(); + + init_genesis(&provider_factory).unwrap(); + + let provider_rw = provider_factory.database_provider_rw().unwrap(); + + setup_without_evm(&provider_rw, SealedHeader::new(header, header_hash), |number| Header { + number, + ..Default::default() + }) + .unwrap(); + + let static_files = provider_factory.static_file_provider(); + let writer = static_files.latest_writer(StaticFileSegment::Headers).unwrap(); + let actual_next_height = writer.next_block_number(); + let expected_next_height = 1701; + + assert_eq!(actual_next_height, expected_next_height); + } +} diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 4f8e13ce8cb..79dc6b21142 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -36,11 +36,15 @@ impl CliRunner { pub const fn from_runtime(tokio_runtime: tokio::runtime::Runtime) -> Self { Self { tokio_runtime } } -} -// === impl CliRunner === + /// Executes an async block on the runtime and blocks until completion. + pub fn block_on(&self, fut: F) -> T + where + F: Future, + { + self.tokio_runtime.block_on(fut) + } -impl CliRunner { /// Executes the given _async_ command on the tokio runtime until the command future resolves or /// until the process receives a `SIGINT` or `SIGTERM` signal. /// diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 96dda811735..ff030c390b9 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,4 +1,5 @@ -use alloy_consensus::TxEnvelope; +use alloy_consensus::{EthereumTxEnvelope, TxEip4844Variant}; +use alloy_eips::eip7594::BlobTransactionSidecarVariant; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; use reth_chainspec::EthereumHardforks; @@ -30,9 +31,12 @@ where } /// Retrieves a transaction envelope by its hash - pub async fn envelope_by_hash(&self, hash: B256) -> eyre::Result { + pub async fn envelope_by_hash( + &self, + hash: B256, + ) -> eyre::Result>> { let tx = self.inner.debug_api().raw_transaction(hash).await?.unwrap(); let tx = tx.to_vec(); - Ok(TxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) + Ok(EthereumTxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) } } diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 54f98469242..dd49ac76195 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -1,5 +1,7 @@ -use alloy_consensus::{EnvKzgSettings, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope}; -use alloy_eips::eip7702::SignedAuthorization; +use alloy_consensus::{ + EnvKzgSettings, EthereumTxEnvelope, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, +}; +use alloy_eips::{eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization}; use alloy_network::{ eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, }; @@ -146,11 +148,13 @@ impl TransactionTestContext { /// Validates the sidecar of a given tx envelope and returns the versioned hashes #[track_caller] - pub fn validate_sidecar(tx: TxEnvelope) -> Vec { + pub fn validate_sidecar( + tx: EthereumTxEnvelope>, + ) -> Vec { let proof_setting = EnvKzgSettings::Default; match tx { - TxEnvelope::Eip4844(signed) => match signed.tx() { + EthereumTxEnvelope::Eip4844(signed) => match signed.tx() { TxEip4844Variant::TxEip4844WithSidecar(tx) => { tx.validate_blob(proof_setting.get()).unwrap(); tx.sidecar.versioned_hashes().collect() diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index e2893bb976a..d1e63a6b3d9 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -71,7 +71,7 @@ where /// Internal function used to advance the chain. /// /// Polls the `ChainOrchestrator` for the next event. - #[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))] + #[tracing::instrument(name = "ChainOrchestrator::poll", skip(self, cx))] fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 8553a9fe63c..3e9cda38f13 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -18,7 +18,7 @@ use reth_trie::{ MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; use std::{sync::Arc, time::Duration}; -use tracing::trace; +use tracing::{debug_span, instrument, trace}; pub(crate) type Cache = mini_moka::sync::Cache; @@ -354,6 +354,7 @@ impl ExecutionCache { } /// Invalidates the storage for all addresses in the set + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(accounts = addresses.len()))] pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { // NOTE: this must collect because the invalidate function should not be called while we // hold an iter for it @@ -385,12 +386,25 @@ impl ExecutionCache { /// ## Error Handling /// /// Returns an error if the state updates are inconsistent and should be discarded. + #[instrument(level = "debug", target = "engine::tree", skip_all)] pub(crate) fn insert_state(&self, state_updates: &BundleState) -> Result<(), ()> { + let _enter = + debug_span!(target: "engine::tree", "contracts", len = state_updates.contracts.len()) + .entered(); // Insert bytecodes for (code_hash, bytecode) in &state_updates.contracts { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); } - + drop(_enter); + + let _enter = debug_span!( + target: "engine::tree", + "accounts", + accounts = state_updates.state.len(), + storages = + state_updates.state.values().map(|account| account.storage.len()).sum::() + ) + .entered(); let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have @@ -474,9 +488,9 @@ impl ExecutionCacheBuilder { .build_with_hasher(DefaultHashBuilder::default()); let account_cache = CacheBuilder::new(self.account_cache_entries) - .weigher(|_key: &Address, _value: &Option| -> u32 { + .weigher(|_key: &Address, value: &Option| -> u32 { // Account has a fixed size (none, balance,code_hash) - size_of::>() as u32 + 20 + size_of_val(value) as u32 }) .max_capacity(account_cache_size) .time_to_live(EXPIRY_TIME) @@ -485,13 +499,19 @@ impl ExecutionCacheBuilder { let code_cache = CacheBuilder::new(self.code_cache_entries) .weigher(|_key: &B256, value: &Option| -> u32 { - match value { + let code_size = match value { Some(bytecode) => { - // base weight + actual bytecode size - (40 + bytecode.len()) as u32 + // base weight + actual (padded) bytecode size + size of the jump table + (size_of_val(value) + + bytecode.bytecode().len() + + bytecode + .legacy_jump_table() + .map(|table| table.as_slice().len()) + .unwrap_or_default()) as u32 } - None => 8, // size of None variant - } + None => size_of_val(value) as u32, + }; + 32 + code_size }) .max_capacity(code_cache_size) .time_to_live(EXPIRY_TIME) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index c014d8ba15e..1d1e208b0a6 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -79,7 +79,7 @@ impl EngineApiMetrics { for tx in transactions { let tx = tx?; let span = - debug_span!(target: "engine::tree", "execute_tx", tx_hash=?tx.tx().tx_hash()); + debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash()); let _enter = span.enter(); trace!(target: "engine::tree", "Executing transaction"); executor.execute_transaction(tx)?; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index e66b2a8892e..a189b643f98 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -496,7 +496,12 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine::tree")] + #[instrument( + level = "debug", + target = "engine::tree", + skip_all, + fields(block_hash = %payload.block_hash(), block_num = %payload.block_number()), + )] fn on_new_payload( &mut self, payload: T::ExecutionData, @@ -577,6 +582,7 @@ where /// - `Valid`: Payload successfully validated and inserted /// - `Syncing`: Parent missing, payload buffered for later /// - Error status: Payload is invalid + #[instrument(level = "debug", target = "engine::tree", skip_all)] fn try_insert_payload( &mut self, payload: T::ExecutionData, @@ -970,7 +976,7 @@ where /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// /// Returns an error if an internal error occurred like a database error. - #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash))] fn on_forkchoice_updated( &mut self, state: ForkchoiceState, @@ -1972,7 +1978,7 @@ where } /// Attempts to connect any buffered blocks that are connected to the given parent hash. - #[instrument(level = "trace", skip(self), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip(self))] fn try_connect_buffered_blocks( &mut self, parent: BlockNumHash, @@ -2281,7 +2287,7 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_hash = %block.hash(), block_num = %block.number()))] fn on_downloaded_block( &mut self, block: RecoveredBlock, @@ -2387,6 +2393,7 @@ where /// Returns `InsertPayloadOk::Inserted(BlockStatus::Valid)` on successful execution, /// `InsertPayloadOk::AlreadySeen` if the block already exists, or /// `InsertPayloadOk::Inserted(BlockStatus::Disconnected)` if parent state is missing. + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_id))] fn insert_block_or_payload( &mut self, block_id: BlockWithParent, diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 176cffcd8fa..90e8928dba2 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -172,4 +172,18 @@ impl SparseTrieInterface for ConfiguredSparseTrie { Self::Parallel(trie) => trie.updates_ref(), } } + + fn node_capacity(&self) -> usize { + match self { + Self::Serial(trie) => trie.node_capacity(), + Self::Parallel(trie) => trie.node_capacity(), + } + } + + fn value_capacity(&self) -> usize { + match self { + Self::Serial(trie) => trie.value_capacity(), + Self::Parallel(trie) => trie.value_capacity(), + } + } } diff --git a/crates/engine/tree/src/tree/payload_processor/executor.rs b/crates/engine/tree/src/tree/payload_processor/executor.rs index 3013c5e1c72..28165d5e8f2 100644 --- a/crates/engine/tree/src/tree/payload_processor/executor.rs +++ b/crates/engine/tree/src/tree/payload_processor/executor.rs @@ -1,10 +1,6 @@ //! Executor for mixed I/O and CPU workloads. -use rayon::ThreadPool as RayonPool; -use std::{ - sync::{Arc, OnceLock}, - time::Duration, -}; +use std::{sync::OnceLock, time::Duration}; use tokio::{ runtime::{Builder, Handle, Runtime}, task::JoinHandle, @@ -12,9 +8,8 @@ use tokio::{ /// An executor for mixed I/O and CPU workloads. /// -/// This type has access to its own rayon pool and uses tokio to spawn blocking tasks. -/// -/// It will reuse an existing tokio runtime if available or create its own. +/// This type uses tokio to spawn blocking tasks and will reuse an existing tokio +/// runtime if available or create its own. #[derive(Debug, Clone)] pub struct WorkloadExecutor { inner: WorkloadExecutorInner, @@ -22,21 +17,11 @@ pub struct WorkloadExecutor { impl Default for WorkloadExecutor { fn default() -> Self { - Self { inner: WorkloadExecutorInner::new(rayon::ThreadPoolBuilder::new().build().unwrap()) } + Self { inner: WorkloadExecutorInner::new() } } } impl WorkloadExecutor { - /// Creates a new executor with the given number of threads for cpu bound work (rayon). - #[expect(unused)] - pub(super) fn with_num_cpu_threads(cpu_threads: usize) -> Self { - Self { - inner: WorkloadExecutorInner::new( - rayon::ThreadPoolBuilder::new().num_threads(cpu_threads).build().unwrap(), - ), - } - } - /// Returns the handle to the tokio runtime pub(super) const fn handle(&self) -> &Handle { &self.inner.handle @@ -51,22 +36,15 @@ impl WorkloadExecutor { { self.inner.handle.spawn_blocking(func) } - - /// Returns access to the rayon pool - #[expect(unused)] - pub(super) const fn rayon_pool(&self) -> &Arc { - &self.inner.rayon_pool - } } #[derive(Debug, Clone)] struct WorkloadExecutorInner { handle: Handle, - rayon_pool: Arc, } impl WorkloadExecutorInner { - fn new(rayon_pool: rayon::ThreadPool) -> Self { + fn new() -> Self { fn get_runtime_handle() -> Handle { Handle::try_current().unwrap_or_else(|_| { // Create a new runtime if no runtime is available @@ -90,6 +68,6 @@ impl WorkloadExecutorInner { }) } - Self { handle: get_runtime_handle(), rayon_pool: Arc::new(rayon_pool) } + Self { handle: get_runtime_handle() } } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index d2e48a49899..8d6230dd82f 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -45,7 +45,7 @@ use std::sync::{ mpsc::{self, channel, Sender}, Arc, }; -use tracing::{debug, instrument, warn}; +use tracing::{debug, debug_span, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -167,6 +167,12 @@ where /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) #[allow(clippy::type_complexity)] + #[instrument( + level = "debug", + target = "engine::tree::payload_processor", + name = "payload processor", + skip_all + )] pub fn spawn>( &mut self, env: ExecutionEnv, @@ -236,7 +242,9 @@ where ); // spawn multi-proof task + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = span.entered(); multi_proof_task.run(); }); @@ -257,6 +265,7 @@ where /// Spawns a task that exclusively handles cache prewarming for transaction execution. /// /// Returns a [`PayloadHandle`] to communicate with the task. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub(super) fn spawn_cache_exclusive>( &self, env: ExecutionEnv, @@ -353,7 +362,9 @@ where // spawn pre-warm task { let to_prewarm_task = to_prewarm_task.clone(); + let span = debug_span!(target: "engine::tree::payload_processor", "prewarm task"); self.executor.spawn_blocking(move || { + let _enter = span.entered(); prewarm_task.run(transactions, to_prewarm_task); }); } @@ -370,7 +381,7 @@ where /// /// If the given hash is different then what is recently cached, then this will create a new /// instance. - #[instrument(target = "engine::caching", skip(self))] + #[instrument(level = "debug", target = "engine::caching", skip(self))] fn cache_for(&self, parent_hash: B256) -> SavedCache { if let Some(cache) = self.execution_cache.get_cache_for(parent_hash) { debug!("reusing execution cache"); @@ -383,6 +394,7 @@ where } /// Spawns the [`SparseTrieTask`] for this payload processor. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, @@ -421,13 +433,18 @@ where sparse_state_trie, ); + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = span.entered(); + let (result, trie) = task.run(); // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie and replace it back into the mutex _after_ sending results - // to the next step, so that time spent clearing doesn't block the step after this one. + // Clear the SparseStateTrie and replace it back into the mutex _after_ sending + // results to the next step, so that time spent clearing doesn't block the step after + // this one. + let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); }); } @@ -452,6 +469,7 @@ impl PayloadHandle { /// # Panics /// /// If payload processing was started without background tasks. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub fn state_root(&mut self) -> Result { self.state_root .take() diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index a528b759570..163714483fd 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -32,7 +32,7 @@ use std::{ }, time::{Duration, Instant}, }; -use tracing::{debug, error, trace}; +use tracing::{debug, error, instrument, trace}; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. @@ -718,6 +718,7 @@ impl MultiProofTask { /// Handles request for proof prefetch. /// /// Returns a number of proofs that were spawned. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, fields(accounts = targets.len()))] fn on_prefetch_proof(&mut self, targets: MultiProofTargets) -> u64 { let proof_targets = self.get_prefetch_proof_targets(targets); self.fetched_proof_targets.extend_ref(&proof_targets); @@ -844,6 +845,7 @@ impl MultiProofTask { /// Handles state updates. /// /// Returns a number of proofs that were spawned. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip(self, update), fields(accounts = update.len()))] fn on_state_update(&mut self, source: StateChangeSource, update: EvmState) -> u64 { let hashed_state_update = evm_state_to_hashed_post_state(update); @@ -973,6 +975,7 @@ impl MultiProofTask { /// currently being calculated, or if there are any pending proofs in the proof sequencer /// left to be revealed by checking the pending tasks. /// 6. This task exits after all pending proofs are processed. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all)] pub(crate) fn run(mut self) { // TODO convert those into fields let mut prefetch_proofs_requested = 0; diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 44293614d3d..de8a88a167b 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -39,7 +39,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, trace, warn}; +use tracing::{debug, debug_span, instrument, trace, warn}; /// A wrapper for transactions that includes their index in the block. #[derive(Clone)] @@ -139,8 +139,11 @@ where let ctx = self.ctx.clone(); let max_concurrency = self.max_concurrency; let transaction_count_hint = self.transaction_count_hint; + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered(); + let (done_tx, done_rx) = mpsc::channel(); let mut executing = 0usize; @@ -157,8 +160,8 @@ where }; // Only spawn initial workers as needed - for _ in 0..workers_needed { - handles.push(ctx.spawn_worker(&executor, actions_tx.clone(), done_tx.clone())); + for i in 0..workers_needed { + handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); } let mut tx_index = 0usize; @@ -248,6 +251,7 @@ where /// the new, warmed cache to be inserted. /// /// This method is called from `run()` only after all execution tasks are complete. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn save_cache(self, state: BundleState) { let start = Instant::now(); @@ -284,6 +288,12 @@ where /// /// This will execute the transactions until all transactions have been processed or the task /// was cancelled. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::prewarm", + name = "prewarm", + skip_all + )] pub(super) fn run( self, pending: mpsc::Receiver + Clone + Send + 'static>, @@ -364,6 +374,7 @@ where { /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating /// execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { let Self { env, @@ -380,7 +391,7 @@ where Ok(provider) => provider, Err(err) => { trace!( - target: "engine::tree", + target: "engine::tree::payload_processor::prewarm", %err, "Failed to build state provider in prewarm thread" ); @@ -429,6 +440,7 @@ where /// /// Note: There are no ordering guarantees; this does not reflect the state produced by /// sequential execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn transact_batch( self, txs: mpsc::Receiver>, @@ -439,7 +451,15 @@ where { let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; - while let Ok(IndexedTransaction { index, tx }) = txs.recv() { + while let Ok(IndexedTransaction { index, tx }) = { + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") + .entered(); + txs.recv() + } { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) + .entered(); + // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. if terminate_execution.load(Ordering::Relaxed) { @@ -467,12 +487,18 @@ where }; metrics.execution_duration.record(start.elapsed()); + drop(_enter); + // Only send outcome for transactions after the first txn // as the main execution will be just as fast if index > 0 { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) + .entered(); let (targets, storage_targets) = multiproof_targets_from_state(res.state); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); + drop(_enter); } metrics.total_runtime.record(start.elapsed()); @@ -485,6 +511,7 @@ where /// Spawns a worker task for transaction execution and returns its sender channel. fn spawn_worker( &self, + idx: usize, executor: &WorkloadExecutor, actions_tx: Sender, done_tx: Sender<()>, @@ -494,8 +521,11 @@ where { let (tx, rx) = mpsc::channel(); let ctx = self.clone(); + let span = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm worker", idx); executor.spawn_blocking(move || { + let _enter = span.entered(); ctx.transact_batch(rx, actions_tx, done_tx); }); diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index c16f7b6e4f4..6302abde5fb 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -15,7 +15,7 @@ use std::{ sync::mpsc, time::{Duration, Instant}, }; -use tracing::{debug, trace, trace_span}; +use tracing::{debug, debug_span, instrument, trace}; /// A task responsible for populating the sparse trie. pub(super) struct SparseTrieTask @@ -61,6 +61,11 @@ where /// /// - State root computation outcome. /// - `SparseStateTrie` that needs to be cleared and reused to avoid reallocations. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::sparse_trie", + skip_all + )] pub(super) fn run( mut self, ) -> (Result, SparseStateTrie) { @@ -80,10 +85,14 @@ where while let Ok(mut update) = self.updates.recv() { num_iterations += 1; let mut num_updates = 1; + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", "drain updates") + .entered(); while let Ok(next) = self.updates.try_recv() { update.extend(next); num_updates += 1; } + drop(_enter); debug!( target: "engine::root", @@ -130,6 +139,7 @@ pub struct StateRootComputeOutcome { } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. +#[instrument(level = "debug", target = "engine::tree::payload_processor::sparse_trie", skip_all)] pub(crate) fn update_sparse_trie( trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, @@ -155,6 +165,7 @@ where ); // Update storage slots with new values and calculate storage roots. + let span = tracing::Span::current(); let (tx, rx) = mpsc::channel(); state .storages @@ -162,14 +173,16 @@ where .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) .par_bridge() .map(|(address, storage, storage_trie)| { - let span = trace_span!(target: "engine::root::sparse", "Storage trie", ?address); - let _enter = span.enter(); - trace!(target: "engine::root::sparse", "Updating storage"); + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", parent: span.clone(), "storage trie", ?address) + .entered(); + + trace!(target: "engine::tree::payload_processor::sparse_trie", "Updating storage"); let storage_provider = blinded_provider_factory.storage_node_provider(address); let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { - trace!(target: "engine::root::sparse", "Wiping storage"); + trace!(target: "engine::tree::payload_processor::sparse_trie", "Wiping storage"); storage_trie.wipe()?; } @@ -187,7 +200,7 @@ where continue; } - trace!(target: "engine::root::sparse", ?slot_nibbles, "Updating storage slot"); + trace!(target: "engine::tree::payload_processor::sparse_trie", ?slot_nibbles, "Updating storage slot"); storage_trie.update_leaf( slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec(), @@ -219,6 +232,9 @@ where let mut removed_accounts = Vec::new(); // Update account storage roots + let _enter = + tracing::debug_span!(target: "engine::tree::payload_processor::sparse_trie", "account trie") + .entered(); for result in rx { let (address, storage_trie) = result?; trie.insert_storage_trie(address, storage_trie); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 4a3d45af8fd..253c6c0e183 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -44,9 +44,8 @@ use reth_trie::{ }; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; -use revm::context::Block; use std::{collections::HashMap, sync::Arc, time::Instant}; -use tracing::{debug, debug_span, error, info, trace, warn}; +use tracing::{debug, debug_span, error, info, instrument, trace, warn}; /// Context providing access to tree state during validation. /// @@ -289,7 +288,7 @@ where V: PayloadValidator, { debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?execution_err, block = ?input.num_hash(), "Block execution failed, checking for header validation errors" @@ -324,6 +323,15 @@ where /// - Block execution /// - State root computation /// - Fork detection + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields( + parent = ?input.parent_hash(), + block_num_hash = ?input.num_hash() + ) + )] pub fn validate_block_with_state>>( &mut self, input: BlockOrPayload, @@ -366,7 +374,9 @@ where let parent_hash = input.parent_hash(); let block_num_hash = input.num_hash(); - trace!(target: "engine::tree", block=?block_num_hash, parent=?parent_hash, "Fetching block state provider"); + trace!(target: "engine::tree::payload_validator", "Fetching block state provider"); + let _enter = + debug_span!(target: "engine::tree::payload_validator", "state provider").entered(); let Some(provider_builder) = ensure_ok!(self.state_provider_builder(parent_hash, ctx.state())) else { @@ -377,8 +387,8 @@ where ) .into()) }; - let state_provider = ensure_ok!(provider_builder.build()); + drop(_enter); // fetch parent block let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(parent_hash, ctx.state())) @@ -390,7 +400,9 @@ where .into()) }; - let evm_env = self.evm_env_for(&input).map_err(NewPayloadError::other)?; + let evm_env = debug_span!(target: "engine::tree::payload_validator", "evm env") + .in_scope(|| self.evm_env_for(&input)) + .map_err(NewPayloadError::other)?; let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; @@ -400,8 +412,7 @@ where let strategy = state_root_plan.strategy; debug!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", ?strategy, "Deciding which state root algorithm to run" ); @@ -417,7 +428,6 @@ where persisting_kind, parent_hash, ctx.state(), - block_num_hash, strategy, )); @@ -452,7 +462,7 @@ where block ); - debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); + debug!(target: "engine::tree::payload_validator", "Calculating block state root"); let root_time = Instant::now(); @@ -460,17 +470,17 @@ where match strategy { StateRootStrategy::StateRootTask => { - debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using sparse trie state root algorithm"); match handle.state_root() { Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = root_time.elapsed(); - info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); + info!(target: "engine::tree::payload_validator", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure if state_root == block.header().state_root() { maybe_state_root = Some((state_root, trie_updates, elapsed)) } else { warn!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?state_root, block_state_root = ?block.header().state_root(), "State root task returned incorrect state root" @@ -478,12 +488,12 @@ where } } Err(error) => { - debug!(target: "engine::tree", %error, "State root task failed"); + debug!(target: "engine::tree::payload_validator", %error, "State root task failed"); } } } StateRootStrategy::Parallel => { - debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using parallel state root algorithm"); match self.compute_state_root_parallel( persisting_kind, block.parent_hash(), @@ -493,8 +503,7 @@ where Ok(result) => { let elapsed = root_time.elapsed(); info!( - target: "engine::tree", - block = ?block_num_hash, + target: "engine::tree::payload_validator", regular_state_root = ?result.0, ?elapsed, "Regular root task finished" @@ -502,7 +511,7 @@ where maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed"); + debug!(target: "engine::tree::payload_validator", %error, "Parallel state root computation failed"); } } } @@ -519,9 +528,9 @@ where } else { // fallback is to compute the state root regularly in sync if self.config.state_root_fallback() { - debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); + debug!(target: "engine::tree::payload_validator", "Using state root fallback for testing"); } else { - warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); + warn!(target: "engine::tree::payload_validator", ?persisting_kind, "Failed to compute state root in parallel"); self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); } @@ -533,7 +542,7 @@ where }; self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); + debug!(target: "engine::tree::payload_validator", ?root_elapsed, "Calculated state root"); // ensure state root matches if state_root != block.header().state_root() { @@ -587,12 +596,12 @@ where /// and block body itself. fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header(block.sealed_header()) { - error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { - error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -600,6 +609,7 @@ where } /// Executes a block with the given state provider + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn execute_block( &mut self, state_provider: S, @@ -614,11 +624,7 @@ where T: PayloadTypes>, Evm: ConfigureEngineEvm, { - let num_hash = NumHash::new(env.evm_env.block_env.number().to(), env.hash); - - let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); - let _enter = span.enter(); - debug!(target: "engine::tree", "Executing block"); + debug!(target: "engine::tree::payload_validator", "Executing block"); let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) @@ -657,7 +663,7 @@ where )?; let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree", elapsed = ?execution_time, number=?num_hash.number, "Executed block"); + debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); Ok(output) } @@ -669,6 +675,7 @@ where /// Returns `Err(_)` if error was encountered during computation. /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation /// should be used instead. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, persisting_kind: PersistingKind, @@ -709,7 +716,7 @@ where { let start = Instant::now(); - trace!(target: "engine::tree", block=?block.num_hash(), "Validating block consensus"); + trace!(target: "engine::tree::payload_validator", block=?block.num_hash(), "Validating block consensus"); // validate block consensus rules if let Err(e) = self.validate_block_inner(block) { return Err(e.into()) @@ -719,7 +726,7 @@ where if let Err(e) = self.consensus.validate_header_against_parent(block.sealed_header(), parent_block) { - warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + warn!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } @@ -759,6 +766,12 @@ where /// The method handles strategy fallbacks if the preferred approach fails, ensuring /// block execution always completes with a valid state root. #[allow(clippy::too_many_arguments)] + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(strategy) + )] fn spawn_payload_processor>( &mut self, env: ExecutionEnv, @@ -767,7 +780,6 @@ where persisting_kind: PersistingKind, parent_hash: B256, state: &EngineApiTreeState, - block_num_hash: NumHash, strategy: StateRootStrategy, ) -> Result< ( @@ -821,8 +833,7 @@ where Err((error, txs, env, provider_builder)) => { // Failed to spawn proof workers, fallback to parallel state root error!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", ?error, "Failed to spawn proof workers, falling back to parallel state root" ); @@ -840,8 +851,7 @@ where // prewarming for transaction execution } else { debug!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", "Disabling state root task due to non-empty prefix sets" ); ( @@ -884,7 +894,7 @@ where state: &EngineApiTreeState, ) -> ProviderResult>> { if let Some((historical, blocks)) = state.tree_state.blocks_by_hash(hash) { - debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, %historical, "found canonical state for block in memory, creating provider builder"); // the block leads back to the canonical chain return Ok(Some(StateProviderBuilder::new( self.provider.clone(), @@ -895,17 +905,18 @@ where // Check if the block is persisted if let Some(header) = self.provider.header(hash)? { - debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); // For persisted blocks, we create a builder that will fetch state directly from the // database return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, None))) } - debug!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree::payload_validator", %hash, "no canonical state found for block"); Ok(None) } /// Determines the state root computation strategy based on persistence state and configuration. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn plan_state_root_computation>>( &self, input: &BlockOrPayload, @@ -939,7 +950,7 @@ where }; debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", block=?input.num_hash(), ?strategy, "Planned state root computation strategy" @@ -979,6 +990,12 @@ where /// block. /// 3. Once in-memory blocks are collected and optionally filtered, we compute the /// [`HashedPostState`] from them. + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(persisting_kind, parent_hash) + )] fn compute_trie_input( &self, persisting_kind: PersistingKind, @@ -999,6 +1016,9 @@ where // If the current block is a descendant of the currently persisting blocks, then we need to // filter in-memory blocks, so that none of them are already persisted in the database. + let _enter = + debug_span!(target: "engine::tree::payload_validator", "filter in-memory blocks", len = blocks.len()) + .entered(); if persisting_kind.is_descendant() { // Iterate over the blocks from oldest to newest. while let Some(block) = blocks.last() { @@ -1023,11 +1043,13 @@ where parent_hash.into() }; } + drop(_enter); - if blocks.is_empty() { - debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); + let blocks_empty = blocks.is_empty(); + if blocks_empty { + debug!(target: "engine::tree::payload_validator", "Parent found on disk"); } else { - debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); + debug!(target: "engine::tree::payload_validator", %historical, blocks = blocks.len(), "Parent found in memory"); } // Convert the historical block to the block number. @@ -1035,12 +1057,15 @@ where .convert_hash_or_number(historical)? .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; + let _enter = + debug_span!(target: "engine::tree::payload_validator", "revert state", blocks_empty) + .entered(); // Retrieve revert state for historical block. let (revert_state, revert_trie) = if block_number == best_block_number { // We do not check against the `last_block_number` here because // `HashedPostState::from_reverts` / `trie_reverts` only use the database tables, and // not static files. - debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); + debug!(target: "engine::tree::payload_validator", block_number, best_block_number, "Empty revert state"); (HashedPostState::default(), TrieUpdatesSorted::default()) } else { let revert_state = HashedPostState::from_reverts::( @@ -1050,7 +1075,7 @@ where .map_err(ProviderError::from)?; let revert_trie = provider.trie_reverts(block_number + 1)?; debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", block_number, best_block_number, accounts = revert_state.accounts.len(), diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index c0e2e4662ca..ab3682be6dc 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -82,10 +82,7 @@ where ) -> Result<()>, ) -> Result<()> where - N: CliNodeTypes< - Primitives: NodePrimitives, - ChainSpec: Hardforks + EthChainSpec, - >, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { let runner = match self.runner.take() { diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 8fd9d08d2dc..1c088e33da6 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,15 +1,21 @@ use crate::utils::eth_payload_attributes; +use alloy_eips::Decodable2718; use alloy_genesis::Genesis; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_ethereum_engine_primitives::BlobSidecars; +use reth_ethereum_primitives::PooledTransactionVariant; use reth_node_builder::{NodeBuilder, NodeHandle}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; use reth_tasks::TaskManager; use reth_transaction_pool::TransactionPool; -use std::sync::Arc; +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; #[tokio::test] async fn can_handle_blobs() -> eyre::Result<()> { @@ -82,3 +88,165 @@ async fn can_handle_blobs() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn can_send_legacy_sidecar_post_activation() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default().chain(MAINNET.chain).genesis(genesis).osaka_activated().build(), + ); + let genesis_hash = chain_spec.genesis_hash(); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; + + let wallets = Wallet::new(2).wallet_gen(); + let blob_wallet = wallets.first().unwrap(); + + // build blob tx + let blob_tx = TransactionTestContext::tx_with_blobs_bytes(1, blob_wallet.clone()).await?; + + let tx = PooledTransactionVariant::decode_2718_exact(&blob_tx).unwrap(); + assert!(tx.as_eip4844().unwrap().tx().sidecar.is_eip4844()); + + // inject blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that sidecar was converted to eip7594 + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip7594()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // build a payload + let blob_payload = node.new_payload().await?; + + // submit the blob payload + let blob_block_hash = node.submit_payload(blob_payload).await?; + + node.update_forkchoice(genesis_hash, blob_block_hash).await?; + + Ok(()) +} + +#[tokio::test] +async fn blob_conversion_at_osaka() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + // Osaka activates in 2 slots + let osaka_timestamp = current_timestamp + 24; + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .prague_activated() + .with_osaka_at(osaka_timestamp) + .build(), + ); + let genesis_hash = chain_spec.genesis_hash(); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; + + let mut wallets = Wallet::new(3).wallet_gen(); + let first = wallets.pop().unwrap(); + let second = wallets.pop().unwrap(); + + // build a dummy payload at `current_timestamp` + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallets.pop().unwrap()).await; + node.rpc.inject_tx(raw_tx).await?; + node.payload.timestamp = current_timestamp - 1; + node.advance_block().await?; + + // build blob txs + let first_blob = TransactionTestContext::tx_with_blobs_bytes(1, first.clone()).await?; + let second_blob = TransactionTestContext::tx_with_blobs_bytes(1, second.clone()).await?; + + // assert both txs have legacy sidecars + assert!(PooledTransactionVariant::decode_2718_exact(&first_blob) + .unwrap() + .as_eip4844() + .unwrap() + .tx() + .sidecar + .is_eip4844()); + assert!(PooledTransactionVariant::decode_2718_exact(&second_blob) + .unwrap() + .as_eip4844() + .unwrap() + .tx() + .sidecar + .is_eip4844()); + + // inject first blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(first_blob).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it still has a legacy sidecar + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip4844()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // build last Prague payload + node.payload.timestamp = current_timestamp + 11; + let prague_payload = node.new_payload().await?; + assert!(matches!(prague_payload.sidecars(), BlobSidecars::Eip4844(_))); + + // inject second blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(second_blob).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it still has a legacy sidecar + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip4844()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + tokio::time::sleep(Duration::from_secs(11)).await; + + // fetch second blob tx from rpc again + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it was converted to eip7594 + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip7594()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // submit the Prague payload + node.update_forkchoice(genesis_hash, node.submit_payload(prague_payload).await?).await?; + + // Build first Osaka payload + node.payload.timestamp = osaka_timestamp - 1; + let osaka_payload = node.new_payload().await?; + + // Assert that it includes the second blob tx with eip7594 sidecar + assert!(osaka_payload.block().body().transactions().any(|tx| *tx.hash() == blob_tx_hash)); + assert!(matches!(osaka_payload.sidecars(), BlobSidecars::Eip7594(_))); + + node.update_forkchoice(genesis_hash, node.submit_payload(osaka_payload).await?).await?; + + Ok(()) +} diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 3686d7bf690..83106cbbe6e 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2402,7 +2402,7 @@ pub enum DiscoveryUpdate { /// Node that was removed from the table Removed(PeerId), /// A series of updates - Batch(Vec), + Batch(Vec), } #[cfg(test)] diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 350cd3f7ed4..dae5e501695 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -499,7 +499,7 @@ impl ECIES { } /// Read and verify an auth message from the input data. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "trace", skip_all)] pub fn read_auth(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { self.remote_init_msg = Some(Bytes::copy_from_slice(data)); let unencrypted = self.decrypt_message(data)?; @@ -571,7 +571,7 @@ impl ECIES { } /// Read and verify an ack message from the input data. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "trace", skip_all)] pub fn read_ack(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { self.remote_init_msg = Some(Bytes::copy_from_slice(data)); let unencrypted = self.decrypt_message(data)?; diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index b5a10284cf2..c4c45366c66 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -58,7 +58,7 @@ impl Decoder for ECIESCodec { type Item = IngressECIESValue; type Error = ECIESError; - #[instrument(level = "trace", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { loop { match self.state { @@ -150,7 +150,7 @@ impl Decoder for ECIESCodec { impl Encoder for ECIESCodec { type Error = io::Error; - #[instrument(level = "trace", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { match item { EgressECIESValue::Auth => { diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 9915fc42e6a..830f3f5ddef 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -40,7 +40,7 @@ where Io: AsyncRead + AsyncWrite + Unpin, { /// Connect to an `ECIES` server - #[instrument(skip(transport, secret_key))] + #[instrument(level = "trace", skip(transport, secret_key))] pub async fn connect( transport: Io, secret_key: SecretKey, diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 1cb725e4efb..a112e8cac89 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -284,9 +284,7 @@ impl TransactionFetcher { // folds size based on expected response size and adds selected hashes to the request // list and the other hashes to the surplus list - loop { - let Some((hash, metadata)) = hashes_from_announcement_iter.next() else { break }; - + for (hash, metadata) in hashes_from_announcement_iter.by_ref() { let Some((_ty, size)) = metadata else { unreachable!("this method is called upon reception of an eth68 announcement") }; @@ -413,7 +411,6 @@ impl TransactionFetcher { if let (_, Some(evicted_hash)) = self.hashes_pending_fetch.insert_and_get_evicted(hash) { self.hashes_fetch_inflight_and_pending_fetch.remove(&evicted_hash); - self.hashes_pending_fetch.remove(&evicted_hash); } } } diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 9be184bc9c0..a261f02c756 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -3,7 +3,8 @@ use crate::{BuilderContext, FullNodeTypes}; use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; -use reth_node_api::TxTy; +use reth_chainspec::EthereumHardforks; +use reth_node_api::{NodeTypes, TxTy}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, @@ -125,8 +126,9 @@ impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, V> { } } -impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> +impl<'a, Node, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> where + Node: FullNodeTypes>, V: TransactionValidator + 'static, V::Transaction: PoolTransaction> + reth_transaction_pool::EthPoolTransaction, @@ -227,7 +229,7 @@ fn spawn_pool_maintenance_task( pool_config: &PoolConfig, ) -> eyre::Result<()> where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, Pool::Transaction: PoolTransaction>, { @@ -259,7 +261,7 @@ pub fn spawn_maintenance_tasks( pool_config: &PoolConfig, ) -> eyre::Result<()> where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, Pool::Transaction: PoolTransaction>, { diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 02fb505b077..3b43f5f3299 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -236,6 +236,7 @@ impl EngineNodeLauncher { info!(target: "reth::cli", "Consensus engine initialized"); + #[allow(clippy::needless_continue)] let events = stream_select!( event_sender.new_listener().map(Into::into), pipeline_events.map(Into::into), diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 70adcc83d69..ed0a3fb64d4 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -23,8 +23,8 @@ use reth_node_core::{ version::{version_metadata, CLIENT_CODE}, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadStore}; -use reth_rpc::eth::{core::EthRpcConverterFor, EthApiTypes, FullEthApiServer}; -use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; +use reth_rpc::eth::{core::EthRpcConverterFor, DevSigner, EthApiTypes, FullEthApiServer}; +use reth_rpc_api::{eth::helpers::EthTransactions, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, @@ -991,7 +991,8 @@ where // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { - registry.eth_api().with_dev_accounts(); + let signers = DevSigner::from_mnemonic(config.dev.dev_mnemonic.as_str(), 20); + registry.eth_api().signers().write().extend(signers); } let mut registry = RpcRegistry { registry }; @@ -1163,7 +1164,6 @@ pub trait EthApiBuilder: Default + Send + 'static { /// The Ethapi implementation this builder will build. type EthApi: EthApiTypes + FullEthApiServer - + AddDevSigners + Unpin + 'static; diff --git a/crates/node/core/src/args/dev.rs b/crates/node/core/src/args/dev.rs index b6a01745257..d62ff1c5dce 100644 --- a/crates/node/core/src/args/dev.rs +++ b/crates/node/core/src/args/dev.rs @@ -5,8 +5,10 @@ use std::time::Duration; use clap::Args; use humantime::parse_duration; +const DEFAULT_MNEMONIC: &str = "test test test test test test test test test test test junk"; + /// Parameters for Dev testnet configuration -#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)] +#[derive(Debug, Args, PartialEq, Eq, Clone)] #[command(next_help_heading = "Dev testnet")] pub struct DevArgs { /// Start the node in dev mode @@ -39,6 +41,28 @@ pub struct DevArgs { verbatim_doc_comment )] pub block_time: Option, + + /// Derive dev accounts from a fixed mnemonic instead of random ones. + #[arg( + long = "dev.mnemonic", + help_heading = "Dev testnet", + value_name = "MNEMONIC", + requires = "dev", + verbatim_doc_comment, + default_value = DEFAULT_MNEMONIC + )] + pub dev_mnemonic: String, +} + +impl Default for DevArgs { + fn default() -> Self { + Self { + dev: false, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + } } #[cfg(test)] @@ -56,13 +80,37 @@ mod tests { #[test] fn test_parse_dev_args() { let args = CommandParser::::parse_from(["reth"]).args; - assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: false, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--dev"]).args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--auto-mine"]).args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from([ "reth", @@ -71,7 +119,15 @@ mod tests { "2", ]) .args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: Some(2), + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args; @@ -80,7 +136,8 @@ mod tests { DevArgs { dev: true, block_max_transactions: None, - block_time: Some(std::time::Duration::from_secs(1)) + block_time: Some(std::time::Duration::from_secs(1)), + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), } ); } diff --git a/crates/node/core/src/args/log.rs b/crates/node/core/src/args/log.rs index 99fefc11445..20c60362d7b 100644 --- a/crates/node/core/src/args/log.rs +++ b/crates/node/core/src/args/log.rs @@ -139,7 +139,7 @@ impl LogArgs { pub enum ColorMode { /// Colors on Always, - /// Colors on + /// Auto-detect Auto, /// Colors off Never, diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 94dbecb649c..7b487a1fa71 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -272,7 +272,7 @@ impl NodeConfig { } /// Set the dev args for the node - pub const fn with_dev(mut self, dev: DevArgs) -> Self { + pub fn with_dev(mut self, dev: DevArgs) -> Self { self.dev = dev; self } @@ -519,7 +519,7 @@ impl Clone for NodeConfig { builder: self.builder.clone(), debug: self.debug.clone(), db: self.db, - dev: self.dev, + dev: self.dev.clone(), pruning: self.pruning.clone(), datadir: self.datadir.clone(), engine: self.engine.clone(), diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs index 0fd1d64ac12..74656511af1 100644 --- a/crates/optimism/cli/src/commands/import.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -71,6 +71,9 @@ impl> ImportOpCommand { .sealed_header(provider_factory.last_block_number()?)? .expect("should have genesis"); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + while let Some(mut file_client) = reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? { @@ -100,7 +103,7 @@ impl> ImportOpCommand { provider_factory.clone(), &consensus, Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + static_file_producer.clone(), true, OpExecutorProvider::optimism(provider_factory.chain_spec()), )?; diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 0d065c29442..7af17ca3523 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -7,7 +7,7 @@ use reth_cli_commands::common::{AccessRights, CliHeader, CliNodeTypes, Environme use reth_db_common::init::init_from_state_dump; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::{ - bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}, + bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH}, OpPrimitives, }; use reth_primitives_traits::SealedHeader; @@ -58,7 +58,6 @@ impl> InitStateCommandOp { reth_cli_commands::init_state::without_evm::setup_without_evm( &provider_rw, SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - BEDROCK_HEADER_TTD, |number| { let mut header = Header::default(); header.set_number(number); diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index e10c5152473..04887d98f4c 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -26,19 +26,19 @@ use reth_optimism_flashblocks::{ ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, InProgressFlashBlockRx, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; -use reth_rpc::eth::{core::EthApiInner, DevSigner}; +use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ helpers::{ - pending_block::BuildPendingEnv, AddDevSigners, EthApiSpec, EthFees, EthState, LoadFee, - LoadPendingBlock, LoadState, SpawnBlocking, Trace, + pending_block::BuildPendingEnv, EthApiSpec, EthFees, EthState, LoadFee, LoadPendingBlock, + LoadState, SpawnBlocking, Trace, }, EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, - RpcNodeCoreExt, RpcTypes, SignableTxRequest, + RpcNodeCoreExt, RpcTypes, }; use reth_rpc_eth_types::{ EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock, PendingBlockEnvOrigin, }; -use reth_storage_api::{ProviderHeader, ProviderTx}; +use reth_storage_api::ProviderHeader; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, @@ -335,18 +335,6 @@ where { } -impl AddDevSigners for OpEthApi -where - N: RpcNodeCore, - Rpc: RpcConvert< - Network: RpcTypes>>, - >, -{ - fn with_dev_accounts(&self) { - *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) - } -} - impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() @@ -483,7 +471,7 @@ where NetworkT: RpcTypes, OpRpcConvert: RpcConvert, OpEthApi>: - FullEthApiServer + AddDevSigners, + FullEthApiServer, { type EthApi = OpEthApi>; diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 3c18cd1befc..317337f050e 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index ecb0f3423be..03faddc1d5b 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -42,7 +42,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { crate::segments::receipts::prune(provider, input) } diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 0849db52518..8fd6d1e73a5 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { // Contract log filtering removes every receipt possible except the ones in the list. So, // for the other receipts it's as if they had a `PruneMode::Distance()` of diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index 35ee487203a..9fbad8c428c 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -37,7 +37,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index ee7447c37da..a4ad37bf789 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -47,7 +47,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index e218f623ed5..0055f8abd22 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -38,7 +38,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune( &self, provider: &Provider, diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 19992ead498..fda19c7cb31 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -27,7 +27,7 @@ pub(crate) struct Batch { // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. -#[instrument(name = "batch", skip(b), level = "TRACE")] +#[instrument(name = "batch", skip(b))] pub(crate) async fn process_batch_request( b: Batch, max_response_body_size: usize, @@ -98,7 +98,7 @@ where } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service))] pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, rpc_service: &S, diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index b6114938d2b..6e6b092c408 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -443,7 +443,7 @@ struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { } /// Spawns the IPC connection onto a new task -#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id))] fn process_connection( params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, ) where diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index a89104bcbaf..046acbda544 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -2,7 +2,7 @@ use crate::{ fees::{CallFees, CallFeesError}, - RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, + RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, }; use alloy_consensus::{ error::ValueError, transaction::Recovered, EthereumTxEnvelope, Sealable, TxEip4844, @@ -128,7 +128,7 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { /// Associated upper layer JSON-RPC API network requests and responses to convert from and into /// types of [`Self::Primitives`]. - type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug; + type Network: RpcTypes>>; /// An associated RPC conversion error. type Error: error::Error + Into>; @@ -901,7 +901,7 @@ impl RpcConvert for RpcConverter where N: NodePrimitives, - Network: RpcTypes + Send + Sync + Unpin + Clone + Debug, + Network: RpcTypes>, Evm: ConfigureEvm + 'static, Receipt: ReceiptConverter< N, diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 44637d1931c..88a7f059323 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "optional_fee_charge"] } reth-chain-state.workspace = true revm-inspectors.workspace = true reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 8f325e757f1..221fef3680f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -300,7 +300,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } // transact all bundles - for bundle in bundles { + for (bundle_index, bundle) in bundles.into_iter().enumerate() { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { // Skip empty bundles @@ -311,15 +311,30 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let block_overrides = block_override.map(Box::new); // transact all transactions in the bundle - for tx in transactions { + for (tx_index, tx) in transactions.into_iter().enumerate() { // Apply overrides, state overrides are only applied for the first tx in the // request let overrides = EvmOverrides::new(state_override.take(), block_overrides.clone()); - let (current_evm_env, prepared_tx) = - this.prepare_call_env(evm_env.clone(), tx, &mut db, overrides)?; - let res = this.transact(&mut db, current_evm_env, prepared_tx)?; + let (current_evm_env, prepared_tx) = this + .prepare_call_env(evm_env.clone(), tx, &mut db, overrides) + .map_err(|err| { + Self::Error::from_eth_err(EthApiError::call_many_error( + bundle_index, + tx_index, + err.into(), + )) + })?; + let res = this.transact(&mut db, current_evm_env, prepared_tx).map_err( + |err| { + Self::Error::from_eth_err(EthApiError::call_many_error( + bundle_index, + tx_index, + err.into(), + )) + }, + )?; match ensure_success::<_, Self::Error>(res.result) { Ok(output) => { @@ -791,6 +806,11 @@ pub trait Call: // Disable EIP-7825 transaction gas limit to support larger transactions evm_env.cfg_env.tx_gas_limit_cap = Some(u64::MAX); + // Disable additional fee charges, e.g. opstack operator fee charge + // See: + // + evm_env.cfg_env.disable_fee_charge = true; + // set nonce to None so that the correct nonce is chosen by the EVM request.as_mut().take_nonce(); diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 29223d78913..19a72ccafb7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -34,7 +34,7 @@ pub use call::{Call, EthCall}; pub use fee::{EthFees, LoadFee}; pub use pending_block::LoadPendingBlock; pub use receipt::LoadReceipt; -pub use signer::{AddDevSigners, EthSigner}; +pub use signer::EthSigner; pub use spec::EthApiSpec; pub use state::{EthState, LoadState}; pub use trace::Trace; diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index 4060be138e0..c54c8943c0a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -32,11 +32,3 @@ pub trait EthSigner: Send + Sync + DynClone { } dyn_clone::clone_trait_object!( EthSigner); - -/// Adds 20 random dev signers for access via the API. Used in dev mode. -#[auto_impl::auto_impl(&)] -pub trait AddDevSigners { - /// Generates 20 random developer accounts. - /// Used in DEV mode. - fn with_dev_accounts(&self); -} diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 22100520016..ed4fcfa5c80 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -2,11 +2,9 @@ use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; use alloy_rpc_types_eth::Block; -use reth_chain_state::CanonStateSubscriptions; -use reth_rpc_convert::RpcConvert; +use reth_rpc_convert::{RpcConvert, SignableTxRequest}; pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; -use reth_storage_api::{ProviderTx, ReceiptProvider, TransactionsProvider}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use reth_storage_api::ProviderTx; use std::{ error::Error, fmt::{self}, @@ -52,12 +50,11 @@ pub type RpcError = ::Error; /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. pub trait FullEthApiTypes where - Self: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, - Pool: TransactionPool< - Transaction: PoolTransaction>, + Self: RpcNodeCore + + EthApiTypes< + NetworkTypes: RpcTypes< + TransactionRequest: SignableTxRequest>, >, - > + EthApiTypes< RpcConvert: RpcConvert< Primitives = Self::Primitives, Network = Self::NetworkTypes, @@ -68,12 +65,11 @@ where } impl FullEthApiTypes for T where - T: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, - Pool: TransactionPool< - Transaction: PoolTransaction>, + T: RpcNodeCore + + EthApiTypes< + NetworkTypes: RpcTypes< + TransactionRequest: SignableTxRequest>, >, - > + EthApiTypes< RpcConvert: RpcConvert< Primitives = ::Primitives, Network = Self::NetworkTypes, diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 196461d18ce..fdb5f8f190f 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -69,7 +69,7 @@ pub enum EthApiError { InvalidTransactionSignature, /// Errors related to the transaction pool #[error(transparent)] - PoolError(RpcPoolError), + PoolError(#[from] RpcPoolError), /// Header not found for block hash/number/tag #[error("header not found")] HeaderNotFound(BlockId), @@ -186,6 +186,16 @@ pub enum EthApiError { /// Error thrown when batch tx send channel fails #[error("Batch transaction sender channel closed")] BatchTxSendError, + /// Error that occurred during `call_many` execution with bundle and transaction context + #[error("call_many error in bundle {bundle_index} and transaction {tx_index}: {}", .error.message())] + CallManyError { + /// Bundle index where the error occurred + bundle_index: usize, + /// Transaction index within the bundle where the error occurred + tx_index: usize, + /// The underlying error object + error: jsonrpsee_types::ErrorObject<'static>, + }, /// Any other error #[error("{0}")] Other(Box), @@ -197,6 +207,15 @@ impl EthApiError { Self::Other(Box::new(err)) } + /// Creates a new [`EthApiError::CallManyError`] variant. + pub const fn call_many_error( + bundle_index: usize, + tx_index: usize, + error: jsonrpsee_types::ErrorObject<'static>, + ) -> Self { + Self::CallManyError { bundle_index, tx_index, error } + } + /// Returns `true` if error is [`RpcInvalidTransactionError::GasTooHigh`] pub const fn is_gas_too_high(&self) -> bool { matches!( @@ -304,6 +323,16 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { EthApiError::BatchTxSendError => { internal_rpc_err("Batch transaction sender channel closed".to_string()) } + EthApiError::CallManyError { bundle_index, tx_index, error } => { + jsonrpsee_types::error::ErrorObject::owned( + error.code(), + format!( + "call_many error in bundle {bundle_index} and transaction {tx_index}: {}", + error.message() + ), + error.data(), + ) + } } } } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index c47c383f057..e028e47448d 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -45,7 +45,7 @@ reth-trie-common.workspace = true alloy-evm = { workspace = true, features = ["overrides"] } alloy-consensus.workspace = true alloy-signer.workspace = true -alloy-signer-local.workspace = true +alloy-signer-local = { workspace = true, features = ["mnemonic"] } alloy-eips = { workspace = true, features = ["kzg"] } alloy-dyn-abi.workspace = true alloy-genesis.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 00a89c10831..62aa625b9f2 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -420,6 +420,11 @@ where Ok(frame.into()) } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { @@ -839,6 +844,11 @@ where return Ok((frame.into(), res.state)); } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index a0e0bd30931..7865659ece7 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -16,7 +16,7 @@ use tracing_futures::Instrument; macro_rules! engine_span { () => { - tracing::trace_span!(target: "rpc", "engine") + tracing::info_span!(target: "rpc", "engine") }; } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 61082f4f929..e3850a67f54 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -30,8 +30,8 @@ use reth_tasks::{ TaskSpawner, TokioTaskExecutor, }; use reth_transaction_pool::{ - noop::NoopTransactionPool, AddedTransactionOutcome, BatchTxProcessor, BatchTxRequest, - TransactionPool, + blobstore::BlobSidecarConverter, noop::NoopTransactionPool, AddedTransactionOutcome, + BatchTxProcessor, BatchTxRequest, TransactionPool, }; use tokio::sync::{broadcast, mpsc, Mutex}; @@ -315,6 +315,9 @@ pub struct EthApiInner { /// Timeout duration for `send_raw_transaction_sync` RPC method. send_raw_transaction_sync_timeout: Duration, + + /// Blob sidecar converter + blob_sidecar_converter: BlobSidecarConverter, } impl EthApiInner @@ -382,6 +385,7 @@ where tx_batch_sender, pending_block_kind, send_raw_transaction_sync_timeout, + blob_sidecar_converter: BlobSidecarConverter::new(), } } } @@ -553,6 +557,12 @@ where pub const fn send_raw_transaction_sync_timeout(&self) -> Duration { self.send_raw_transaction_sync_timeout } + + /// Returns a handle to the blob sidecar converter. + #[inline] + pub const fn blob_sidecar_converter(&self) -> &BlobSidecarConverter { + &self.blob_sidecar_converter + } } #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 60d6a151f9b..2c18245d542 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -1,33 +1,14 @@ //! An abstraction over ethereum signers. -use std::collections::HashMap; - -use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_signer::SignerSync; -use alloy_signer_local::PrivateKeySigner; -use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest}; -use reth_rpc_eth_api::{ - helpers::{signer::Result, AddDevSigners, EthSigner}, - FromEvmError, RpcNodeCore, -}; -use reth_rpc_eth_types::{EthApiError, SignError}; -use reth_storage_api::ProviderTx; - -impl AddDevSigners for EthApi -where - N: RpcNodeCore, - EthApiError: FromEvmError, - Rpc: RpcConvert< - Network: RpcTypes>>, - >, -{ - fn with_dev_accounts(&self) { - *self.inner.signers().write() = DevSigner::random_signers(20) - } -} +use alloy_signer_local::{coins_bip39::English, MnemonicBuilder, PrivateKeySigner}; +use reth_rpc_convert::SignableTxRequest; +use reth_rpc_eth_api::helpers::{signer::Result, EthSigner}; +use reth_rpc_eth_types::SignError; +use std::collections::HashMap; /// Holds developer keys #[derive(Debug, Clone)] @@ -55,6 +36,32 @@ impl DevSigner { signers } + /// Generates dev signers deterministically from a fixed mnemonic. + /// Uses the Ethereum derivation path: `m/44'/60'/0'/0/{index}` + pub fn from_mnemonic>( + mnemonic: &str, + num: u32, + ) -> Vec + 'static>> { + let mut signers = Vec::with_capacity(num as usize); + + for i in 0..num { + let sk = MnemonicBuilder::::default() + .phrase(mnemonic) + .index(i) + .expect("invalid derivation path") + .build() + .expect("failed to build signer from mnemonic"); + + let address = sk.address(); + let addresses = vec![address]; + let accounts = HashMap::from([(address, sk)]); + + signers.push(Box::new(Self { addresses, accounts }) as Box>); + } + + signers + } + fn get_key(&self, account: Address) -> Result<&PrivateKeySigner> { self.accounts.get(&account).ok_or(SignError::NoAccount) } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 4fa39112166..39758f68d77 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -3,14 +3,22 @@ use std::time::Duration; use crate::EthApi; +use alloy_consensus::BlobTransactionValidationError; +use alloy_eips::{eip7594::BlobTransactionSidecarVariant, BlockId, Typed2718}; use alloy_primitives::{hex, Bytes, B256}; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_primitives_traits::AlloyBlockHeader; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; -use reth_transaction_pool::{AddedTransactionOutcome, PoolTransaction, TransactionPool}; +use reth_rpc_eth_types::{error::RpcPoolError, utils::recover_raw_transaction, EthApiError}; +use reth_storage_api::BlockReaderIdExt; +use reth_transaction_pool::{ + error::Eip4844PoolTransactionError, AddedTransactionOutcome, EthBlobTransactionSidecar, + EthPoolTransaction, PoolTransaction, TransactionPool, +}; impl EthTransactions for EthApi where @@ -34,7 +42,56 @@ where async fn send_raw_transaction(&self, tx: Bytes) -> Result { let recovered = recover_raw_transaction(&tx)?; - let pool_transaction = ::Transaction::from_pooled(recovered); + let mut pool_transaction = + ::Transaction::from_pooled(recovered); + + // TODO: remove this after Osaka transition + // Convert legacy blob sidecars to EIP-7594 format + if pool_transaction.is_eip4844() { + let EthBlobTransactionSidecar::Present(sidecar) = pool_transaction.take_blob() else { + return Err(EthApiError::PoolError(RpcPoolError::Eip4844( + Eip4844PoolTransactionError::MissingEip4844BlobSidecar, + ))); + }; + + let sidecar = match sidecar { + BlobTransactionSidecarVariant::Eip4844(sidecar) => { + let latest = self + .provider() + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockId::latest()))?; + // Convert to EIP-7594 if next block is Osaka + if self + .provider() + .chain_spec() + .is_osaka_active_at_timestamp(latest.timestamp().saturating_add(12)) + { + BlobTransactionSidecarVariant::Eip7594( + self.blob_sidecar_converter().convert(sidecar).await.ok_or_else( + || { + RpcPoolError::Eip4844( + Eip4844PoolTransactionError::InvalidEip4844Blob( + BlobTransactionValidationError::InvalidProof, + ), + ) + }, + )?, + ) + } else { + BlobTransactionSidecarVariant::Eip4844(sidecar) + } + } + sidecar => sidecar, + }; + + pool_transaction = + EthPoolTransaction::try_from_eip4844(pool_transaction.into_consensus(), sidecar) + .ok_or_else(|| { + RpcPoolError::Eip4844( + Eip4844PoolTransactionError::MissingEip4844BlobSidecar, + ) + })?; + } // forward the transaction to the specific endpoint if configured. if let Some(client) = self.raw_tx_forwarder() { diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index c738a64c2d5..328ea29193f 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -426,7 +426,7 @@ where let timeout = override_timeout .map(Duration::from_secs) - .filter(|&custom_duration| custom_duration <= MAX_SIM_TIMEOUT) + .map(|d| d.min(MAX_SIM_TIMEOUT)) .unwrap_or(DEFAULT_SIM_TIMEOUT); let bundle_res = diff --git a/crates/stateless/src/recover_block.rs b/crates/stateless/src/recover_block.rs index b402cb3724f..15db1fe55e1 100644 --- a/crates/stateless/src/recover_block.rs +++ b/crates/stateless/src/recover_block.rs @@ -2,15 +2,28 @@ use crate::validation::StatelessValidationError; use alloc::vec::Vec; use alloy_consensus::BlockHeader; use alloy_primitives::{Address, Signature, B256}; +use core::ops::Deref; use reth_chainspec::EthereumHardforks; use reth_ethereum_primitives::{Block, TransactionSigned}; use reth_primitives_traits::{Block as _, RecoveredBlock}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, Bytes}; #[cfg(all(feature = "k256", feature = "secp256k1"))] use k256 as _; /// Serialized uncompressed public key -pub type UncompressedPublicKey = [u8; 65]; +#[serde_as] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UncompressedPublicKey(#[serde_as(as = "Bytes")] pub [u8; 65]); + +impl Deref for UncompressedPublicKey { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} /// Verifies all transactions in a block against a list of public keys and signatures. /// diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 87f009356a0..8b24f0f8d19 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -9,7 +9,9 @@ use reth_config::config::EtlConfig; use reth_db_api::{tables, transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; use reth_execution_errors::StateRootError; -use reth_primitives_traits::{Account, Bytecode, GotExpected, NodePrimitives, StorageEntry}; +use reth_primitives_traits::{ + Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry, +}; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, @@ -389,13 +391,16 @@ where } let block = provider_rw.last_block_number()?; + let hash = provider_rw .block_hash(block)? .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?; - let expected_state_root = provider_rw + let header = provider_rw .header_by_number(block)? - .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))? - .state_root(); + .map(SealedHeader::seal_slow) + .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?; + + let expected_state_root = header.state_root(); // first line can be state root let dump_state_root = parse_state_root(&mut reader)?; @@ -403,6 +408,7 @@ where error!(target: "reth::cli", ?dump_state_root, ?expected_state_root, + header=?header.num_hash(), "State root from state dump does not match state root in current header." ); return Err(InitStorageError::StateRootMismatch(GotExpected { diff --git a/crates/transaction-pool/src/blobstore/converter.rs b/crates/transaction-pool/src/blobstore/converter.rs new file mode 100644 index 00000000000..3f6abc56bff --- /dev/null +++ b/crates/transaction-pool/src/blobstore/converter.rs @@ -0,0 +1,30 @@ +use alloy_consensus::{BlobTransactionSidecar, EnvKzgSettings}; +use alloy_eips::eip7594::BlobTransactionSidecarEip7594; +use tokio::sync::Semaphore; + +// We allow up to 5 concurrent conversions to avoid excessive memory usage. +static SEMAPHORE: Semaphore = Semaphore::const_new(5); + +/// A simple semaphore-based blob sidecar converter. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct BlobSidecarConverter; + +impl BlobSidecarConverter { + /// Creates a new blob sidecar converter. + pub const fn new() -> Self { + Self + } + + /// Converts the blob sidecar to the EIP-7594 format. + pub async fn convert( + &self, + sidecar: BlobTransactionSidecar, + ) -> Option { + let _permit = SEMAPHORE.acquire().await.ok()?; + tokio::task::spawn_blocking(move || sidecar.try_into_7594(EnvKzgSettings::Default.get())) + .await + .ok()? + .ok() + } +} diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index 29844994bc0..ee7eb45af0f 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -5,6 +5,7 @@ use alloy_eips::{ eip7594::BlobTransactionSidecarVariant, }; use alloy_primitives::B256; +pub use converter::BlobSidecarConverter; pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore}; pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; @@ -17,6 +18,7 @@ use std::{ }; pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; +mod converter; pub mod disk; mod mem; mod noop; diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 732d55d0c3f..aa0366341a6 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -1,11 +1,12 @@ //! Support for maintaining the state of the transaction pool use crate::{ - blobstore::{BlobStoreCanonTracker, BlobStoreUpdates}, + blobstore::{BlobSidecarConverter, BlobStoreCanonTracker, BlobStoreUpdates}, error::PoolError, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, PoolUpdateKind, TransactionOrigin, + AllPoolTransactions, BlobTransactionSidecarVariant, BlockInfo, PoolTransaction, PoolUpdateKind, + TransactionOrigin, }; use alloy_consensus::{transaction::TxHashRef, BlockHeader, Typed2718}; use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718}; @@ -16,7 +17,7 @@ use futures_util::{ FutureExt, Stream, StreamExt, }; use reth_chain_state::CanonStateNotification; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives_traits::{ @@ -103,12 +104,12 @@ where N: NodePrimitives, Client: StateProviderFactory + BlockReaderIdExt
- + ChainSpecProvider> + + ChainSpecProvider + EthereumHardforks> + Clone + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, - Tasks: TaskSpawner + 'static, + Tasks: TaskSpawner + Clone + 'static, { async move { maintain_transaction_pool(client, pool, events, task_spawner, config).await; @@ -129,12 +130,12 @@ pub async fn maintain_transaction_pool( N: NodePrimitives, Client: StateProviderFactory + BlockReaderIdExt
- + ChainSpecProvider> + + ChainSpecProvider + EthereumHardforks> + Clone + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, - Tasks: TaskSpawner + 'static, + Tasks: TaskSpawner + Clone + 'static, { let metrics = MaintainPoolMetrics::default(); let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; @@ -494,6 +495,89 @@ pub async fn maintain_transaction_pool( // keep track of mined blob transactions blob_store_tracker.add_new_chain_blocks(&blocks); + + // If Osaka activates in 2 slots we need to convert blobs to new format. + if !chain_spec.is_osaka_active_at_timestamp(tip.timestamp()) && + !chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(12)) && + chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(24)) + { + let pool = pool.clone(); + let spawner = task_spawner.clone(); + let client = client.clone(); + task_spawner.spawn(Box::pin(async move { + // Start converting not eaerlier than 4 seconds into current slot to ensure + // that our pool only contains valid transactions for the next block (as + // it's not Osaka yet). + tokio::time::sleep(Duration::from_secs(4)).await; + + let mut interval = tokio::time::interval(Duration::from_secs(1)); + loop { + // Loop and replace blob transactions until we reach Osaka transition + // block after which no legacy blobs are going to be accepted. + let last_iteration = + client.latest_header().ok().flatten().is_none_or(|header| { + client + .chain_spec() + .is_osaka_active_at_timestamp(header.timestamp()) + }); + + let AllPoolTransactions { pending, queued } = pool.all_transactions(); + for tx in pending + .into_iter() + .chain(queued) + .filter(|tx| tx.transaction.is_eip4844()) + { + let tx_hash = *tx.transaction.hash(); + + // Fetch sidecar from the pool + let Ok(Some(sidecar)) = pool.get_blob(tx_hash) else { + continue; + }; + // Ensure it is a legacy blob + if !sidecar.is_eip4844() { + continue; + } + // Remove transaction and sidecar from the pool, both are in memory + // now + let Some(tx) = pool.remove_transactions(vec![tx_hash]).pop() else { + continue; + }; + pool.delete_blob(tx_hash); + + let BlobTransactionSidecarVariant::Eip4844(sidecar) = + Arc::unwrap_or_clone(sidecar) + else { + continue; + }; + + let converter = BlobSidecarConverter::new(); + let pool = pool.clone(); + spawner.spawn(Box::pin(async move { + // Convert sidecar to EIP-7594 format + let Some(sidecar) = converter.convert(sidecar).await else { + return; + }; + + // Re-insert transaction with the new sidecar + let origin = tx.origin; + let Some(tx) = EthPoolTransaction::try_from_eip4844( + tx.transaction.clone_into_consensus(), + sidecar.into(), + ) else { + return; + }; + let _ = pool.add_transaction(origin, tx).await; + })); + } + + if last_iteration { + break; + } + + interval.tick().await; + } + })); + } } } } diff --git a/crates/transaction-pool/src/test_utils/pool.rs b/crates/transaction-pool/src/test_utils/pool.rs index 6af440f086a..ab7bebae2f5 100644 --- a/crates/transaction-pool/src/test_utils/pool.rs +++ b/crates/transaction-pool/src/test_utils/pool.rs @@ -188,7 +188,7 @@ pub(crate) enum Scenario { HigherNonce { onchain: u64, nonce: u64 }, Multi { // Execute multiple test scenarios - scenario: Vec, + scenario: Vec, }, } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9552646652b..2b9d8bae8ab 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -429,6 +429,20 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Consumer: Utility fn all_transaction_hashes(&self) -> Vec; + /// Removes a single transaction corresponding to the given hash. + /// + /// Note: This removes the transaction as if it got discarded (_not_ mined). + /// + /// Returns the removed transaction if it was found in the pool. + /// + /// Consumer: Utility + fn remove_transaction( + &self, + hash: TxHash, + ) -> Option>> { + self.remove_transactions(vec![hash]).pop() + } + /// Removes all transactions corresponding to the given hashes. /// /// Note: This removes the transactions as if they got discarded (_not_ mined). diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 9eab8767d6d..038c820bfe9 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -39,7 +39,7 @@ use std::{ atomic::{AtomicBool, AtomicU64}, Arc, }, - time::Instant, + time::{Instant, SystemTime}, }; use tokio::sync::Mutex; @@ -673,7 +673,7 @@ where Eip4844PoolTransactionError::UnexpectedEip4844SidecarAfterOsaka, )) } - } else if sidecar.is_eip7594() { + } else if sidecar.is_eip7594() && !self.allow_7594_sidecars() { return Err(InvalidPoolTransactionError::Eip4844( Eip4844PoolTransactionError::UnexpectedEip7594SidecarBeforeOsaka, )) @@ -745,6 +745,10 @@ where self.fork_tracker.osaka.store(true, std::sync::atomic::Ordering::Relaxed); } + self.fork_tracker + .tip_timestamp + .store(new_tip_block.timestamp(), std::sync::atomic::Ordering::Relaxed); + if let Some(blob_params) = self.chain_spec().blob_params_at_timestamp(new_tip_block.timestamp()) { @@ -759,6 +763,24 @@ where fn max_gas_limit(&self) -> u64 { self.block_gas_limit.load(std::sync::atomic::Ordering::Relaxed) } + + /// Returns whether EIP-7594 sidecars are allowed + fn allow_7594_sidecars(&self) -> bool { + let tip_timestamp = self.fork_tracker.tip_timestamp(); + + // If next block is Osaka, allow 7594 sidecars + if self.chain_spec().is_osaka_active_at_timestamp(tip_timestamp.saturating_add(12)) { + true + } else if self.chain_spec().is_osaka_active_at_timestamp(tip_timestamp.saturating_add(24)) { + let current_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + + // Allow after 4 seconds into last non-Osaka slot + current_timestamp >= tip_timestamp.saturating_add(4) + } else { + false + } + } } impl TransactionValidator for EthTransactionValidator @@ -811,6 +833,8 @@ pub struct EthTransactionValidatorBuilder { prague: bool, /// Fork indicator whether we are in the Osaka hardfork. osaka: bool, + /// Timestamp of the tip block. + tip_timestamp: u64, /// Max blob count at the block's timestamp. max_blob_count: u64, /// Whether using EIP-2718 type transactions is allowed @@ -885,6 +909,8 @@ impl EthTransactionValidatorBuilder { // osaka not yet activated osaka: false, + tip_timestamp: 0, + // max blob count is prague by default max_blob_count: BlobParams::prague().max_blobs_per_tx, @@ -1012,6 +1038,7 @@ impl EthTransactionValidatorBuilder { self.cancun = self.client.chain_spec().is_cancun_active_at_timestamp(timestamp); self.prague = self.client.chain_spec().is_prague_active_at_timestamp(timestamp); self.osaka = self.client.chain_spec().is_osaka_active_at_timestamp(timestamp); + self.tip_timestamp = timestamp; self.max_blob_count = self .client .chain_spec() @@ -1072,6 +1099,7 @@ impl EthTransactionValidatorBuilder { cancun, prague, osaka, + tip_timestamp, eip2718, eip1559, eip4844, @@ -1094,6 +1122,7 @@ impl EthTransactionValidatorBuilder { cancun: AtomicBool::new(cancun), prague: AtomicBool::new(prague), osaka: AtomicBool::new(osaka), + tip_timestamp: AtomicU64::new(tip_timestamp), max_blob_count: AtomicU64::new(max_blob_count), }; @@ -1175,6 +1204,8 @@ pub struct ForkTracker { pub osaka: AtomicBool, /// Tracks max blob count per transaction at the block's timestamp. pub max_blob_count: AtomicU64, + /// Tracks the timestamp of the tip block. + pub tip_timestamp: AtomicU64, } impl ForkTracker { @@ -1198,6 +1229,11 @@ impl ForkTracker { self.osaka.load(std::sync::atomic::Ordering::Relaxed) } + /// Returns the timestamp of the tip block. + pub fn tip_timestamp(&self) -> u64 { + self.tip_timestamp.load(std::sync::atomic::Ordering::Relaxed) + } + /// Returns the max allowed blob count per transaction. pub fn max_blob_count(&self) -> u64 { self.max_blob_count.load(std::sync::atomic::Ordering::Relaxed) @@ -1272,6 +1308,7 @@ mod tests { cancun: false.into(), prague: false.into(), osaka: false.into(), + tip_timestamp: 0.into(), max_blob_count: 0.into(), }; diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 6714893f16d..35c4bc67839 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -280,8 +280,8 @@ mod tests { prefix_set_mut.insert(Nibbles::from_nibbles([4, 5, 6])); prefix_set_mut.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate - assert_eq!(prefix_set_mut.keys.len(), 4); // Length should be 3 (including duplicate) - assert_eq!(prefix_set_mut.keys.capacity(), 4); // Capacity should be 4 (including duplicate) + assert_eq!(prefix_set_mut.keys.len(), 4); // Length is 4 (before deduplication) + assert_eq!(prefix_set_mut.keys.capacity(), 4); // Capacity is 4 (before deduplication) let mut prefix_set = prefix_set_mut.freeze(); assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); @@ -300,8 +300,8 @@ mod tests { prefix_set_mut.insert(Nibbles::from_nibbles([4, 5, 6])); prefix_set_mut.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate - assert_eq!(prefix_set_mut.keys.len(), 4); // Length should be 3 (including duplicate) - assert_eq!(prefix_set_mut.keys.capacity(), 101); // Capacity should be 101 (including duplicate) + assert_eq!(prefix_set_mut.keys.len(), 4); // Length is 4 (before deduplication) + assert_eq!(prefix_set_mut.keys.capacity(), 101); // Capacity is 101 (before deduplication) let mut prefix_set = prefix_set_mut.freeze(); assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 256ee20794e..6d37c5f3413 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -20,7 +20,7 @@ use std::{ collections::HashMap, ops::{RangeBounds, RangeInclusive}, }; -use tracing::debug; +use tracing::{debug, instrument}; /// Extends [`StateRoot`] with operations specific for working with a database transaction. pub trait DatabaseStateRoot<'a, TX>: Sized { @@ -226,6 +226,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { + #[instrument(target = "trie::db", skip(tx), fields(range))] fn from_reverts( tx: &TX, range: impl RangeBounds, diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index ffa7aa4dc31..3ea5994488a 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -101,7 +101,7 @@ impl ParallelProof { ); self.proof_worker_handle - .queue_storage_proof(input) + .dispatch_storage_proof(input) .map_err(|e| ParallelStateRootError::Other(e.to_string())) } diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 5c26f6d99c3..b3269f21fbb 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -693,7 +693,7 @@ where multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - let span = tracing::trace_span!( + let span = tracing::info_span!( target: "trie::proof_task", "Storage proof calculation", hashed_address = ?hashed_address, @@ -958,8 +958,8 @@ impl ProofWorkerHandle { Self { storage_work_tx, account_work_tx } } - /// Queue a storage proof computation - pub fn queue_storage_proof( + /// Dispatch a storage proof computation to storage worker pool + pub fn dispatch_storage_proof( &self, input: StorageProofInput, ) -> Result, ProviderError> { @@ -988,8 +988,8 @@ impl ProofWorkerHandle { Ok(rx) } - /// Internal: Queue blinded storage node request - fn queue_blinded_storage_node( + /// Dispatch blinded storage node request to storage worker pool + pub(crate) fn dispatch_blinded_storage_node( &self, account: B256, path: Nibbles, @@ -1004,8 +1004,8 @@ impl ProofWorkerHandle { Ok(rx) } - /// Internal: Queue blinded account node request - fn queue_blinded_account_node( + /// Dispatch blinded account node request to account worker pool + pub(crate) fn dispatch_blinded_account_node( &self, path: Nibbles, ) -> Result, ProviderError> { @@ -1055,13 +1055,13 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { match self { Self::AccountNode { handle } => { let rx = handle - .queue_blinded_account_node(*path) + .dispatch_blinded_account_node(*path) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } Self::StorageNode { handle, account } => { let rx = handle - .queue_blinded_storage_node(*account, *path) + .dispatch_blinded_storage_node(*account, *path) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index 449c3a7b29b..b5454dd3970 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -106,4 +106,20 @@ impl LowerSparseSubtrie { Self::Revealed(_) | Self::Blind(_) => None, } } + + /// Returns the capacity of any maps containing trie nodes + pub(crate) fn node_capacity(&self) -> usize { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => trie.node_capacity(), + Self::Blind(None) => 0, + } + } + + /// Returns the capacity of any maps containing trie values + pub(crate) fn value_capacity(&self) -> usize { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => trie.value_capacity(), + Self::Blind(None) => 0, + } + } } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 472624f99d7..b15eb7f4edb 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -741,13 +741,24 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes in parallel { use rayon::iter::{IntoParallelIterator, ParallelIterator}; + use tracing::info_span; + let (tx, rx) = mpsc::channel(); let branch_node_tree_masks = &self.branch_node_tree_masks; let branch_node_hash_masks = &self.branch_node_hash_masks; + let span = tracing::Span::current(); changed_subtries .into_par_iter() .map(|mut changed_subtrie| { + let _enter = info_span!( + target: "trie::sparse::parallel", + parent: span.clone(), + "subtrie", + index = changed_subtrie.index + ) + .entered(); + #[cfg(feature = "metrics")] let start = std::time::Instant::now(); changed_subtrie.subtrie.update_hashes( @@ -862,6 +873,16 @@ impl SparseTrieInterface for ParallelSparseTrie { } } } + + fn node_capacity(&self) -> usize { + self.upper_subtrie.node_capacity() + + self.lower_subtries.iter().map(|trie| trie.node_capacity()).sum::() + } + + fn value_capacity(&self) -> usize { + self.upper_subtrie.value_capacity() + + self.lower_subtries.iter().map(|trie| trie.value_capacity()).sum::() + } } impl ParallelSparseTrie { @@ -1282,6 +1303,7 @@ impl ParallelSparseTrie { /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. + #[instrument(target = "trie::sparse::parallel", skip_all)] fn apply_subtrie_update_actions( &mut self, update_actions: impl Iterator, @@ -1305,7 +1327,7 @@ impl ParallelSparseTrie { } /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. - #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, ret)] + #[instrument(target = "trie::parallel_sparse", skip_all, ret(level = "trace"))] fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); @@ -1383,6 +1405,7 @@ impl ParallelSparseTrie { /// /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is /// responsible for returning them back into the array. + #[instrument(target = "trie::sparse::parallel", skip_all, fields(prefix_set_len = prefix_set.len()))] fn take_changed_lower_subtries( &mut self, prefix_set: &mut PrefixSet, @@ -1539,6 +1562,7 @@ impl ParallelSparseTrie { /// Return updated subtries back to the trie after executing any actions required on the /// top-level `SparseTrieUpdates`. + #[instrument(target = "trie::sparse::parallel", skip_all)] fn insert_changed_subtries( &mut self, changed_subtries: impl IntoIterator, @@ -2026,7 +2050,7 @@ impl SparseSubtrie { /// # Panics /// /// If the node at the root path does not exist. - #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret)] + #[instrument(target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret(level = "trace"))] fn update_hashes( &mut self, prefix_set: &mut PrefixSet, @@ -2077,6 +2101,16 @@ impl SparseSubtrie { self.nodes.clear(); self.inner.clear(); } + + /// Returns the capacity of the map containing trie nodes. + pub(crate) fn node_capacity(&self) -> usize { + self.nodes.capacity() + } + + /// Returns the capacity of the map containing trie values. + pub(crate) fn value_capacity(&self) -> usize { + self.inner.value_capacity() + } } /// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original @@ -2410,6 +2444,11 @@ impl SparseSubtrieInner { self.values.clear(); self.buffers.clear(); } + + /// Returns the capacity of the map storing leaf values + fn value_capacity(&self) -> usize { + self.values.capacity() + } } /// Represents the outcome of processing a node during leaf insertion diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 6fac7c5faad..b2c7ee0f566 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-trie-common.workspace = true -tracing.workspace = true +tracing = { workspace = true, features = ["attributes"] } alloy-trie.workspace = true # alloy diff --git a/crates/trie/sparse/src/metrics.rs b/crates/trie/sparse/src/metrics.rs index 430a831a2f7..3f39e6df6f9 100644 --- a/crates/trie/sparse/src/metrics.rs +++ b/crates/trie/sparse/src/metrics.rs @@ -1,5 +1,6 @@ //! Metrics for the sparse state trie +use metrics::Gauge; use reth_metrics::{metrics::Histogram, Metrics}; /// Metrics for the sparse state trie @@ -15,24 +16,24 @@ pub(crate) struct SparseStateTrieMetrics { pub(crate) multiproof_skipped_storage_nodes: u64, /// Number of total storage nodes, including those that were skipped. pub(crate) multiproof_total_storage_nodes: u64, - /// The actual metrics we will record into the histogram - pub(crate) histograms: SparseStateTrieHistograms, + /// The actual metrics we will record + pub(crate) inner_metrics: SparseStateTrieInnerMetrics, } impl SparseStateTrieMetrics { /// Record the metrics into the histograms pub(crate) fn record(&mut self) { use core::mem::take; - self.histograms + self.inner_metrics .multiproof_skipped_account_nodes .record(take(&mut self.multiproof_skipped_account_nodes) as f64); - self.histograms + self.inner_metrics .multiproof_total_account_nodes .record(take(&mut self.multiproof_total_account_nodes) as f64); - self.histograms + self.inner_metrics .multiproof_skipped_storage_nodes .record(take(&mut self.multiproof_skipped_storage_nodes) as f64); - self.histograms + self.inner_metrics .multiproof_total_storage_nodes .record(take(&mut self.multiproof_total_storage_nodes) as f64); } @@ -56,12 +57,28 @@ impl SparseStateTrieMetrics { pub(crate) const fn increment_total_storage_nodes(&mut self, count: u64) { self.multiproof_total_storage_nodes += count; } + + /// Set the value capacity for the sparse state trie + pub(crate) fn set_value_capacity(&self, capacity: usize) { + self.inner_metrics.value_capacity.set(capacity as f64); + } + + /// Set the node capacity for the sparse state trie + pub(crate) fn set_node_capacity(&self, capacity: usize) { + self.inner_metrics.node_capacity.set(capacity as f64); + } + + /// Set the number of cleared and active storage tries + pub(crate) fn set_storage_trie_metrics(&self, cleared: usize, active: usize) { + self.inner_metrics.cleared_storage_tries.set(cleared as f64); + self.inner_metrics.active_storage_tries.set(active as f64); + } } /// Metrics for the sparse state trie #[derive(Metrics)] #[metrics(scope = "sparse_state_trie")] -pub(crate) struct SparseStateTrieHistograms { +pub(crate) struct SparseStateTrieInnerMetrics { /// Histogram of account nodes that were skipped during a multiproof reveal due to being /// redundant (i.e. they were already revealed) pub(crate) multiproof_skipped_account_nodes: Histogram, @@ -72,4 +89,12 @@ pub(crate) struct SparseStateTrieHistograms { pub(crate) multiproof_skipped_storage_nodes: Histogram, /// Histogram of total storage nodes, including those that were skipped. pub(crate) multiproof_total_storage_nodes: Histogram, + /// Gauge for the trie's node capacity + pub(crate) node_capacity: Gauge, + /// Gauge for the trie's value capacity + pub(crate) value_capacity: Gauge, + /// The current number of cleared storage tries. + pub(crate) cleared_storage_tries: Gauge, + /// The number of currently active storage tries, i.e., not cleared + pub(crate) active_storage_tries: Gauge, } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index fde4810da57..aef552da3dd 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -18,7 +18,7 @@ use reth_trie_common::{ DecodedMultiProof, DecodedStorageMultiProof, MultiProof, Nibbles, RlpNode, StorageMultiProof, TrieAccount, TrieMask, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use tracing::trace; +use tracing::{instrument, trace}; /// Provides type-safe re-use of cleared [`SparseStateTrie`]s, which helps to save allocations /// across payload runs. @@ -208,6 +208,14 @@ where /// Reveal unknown trie paths from decoded multiproof. /// NOTE: This method does not extensively validate the proof. + #[instrument( + target = "trie::sparse", + skip_all, + fields( + account_nodes = multiproof.account_subtree.len(), + storages = multiproof.storages.len() + ) + )] pub fn reveal_decoded_multiproof( &mut self, multiproof: DecodedMultiProof, @@ -532,6 +540,7 @@ where /// Calculates the hashes of subtries. /// /// If the trie has not been revealed, this function does nothing. + #[instrument(target = "trie::sparse", skip_all)] pub fn calculate_subtries(&mut self) { if let SparseTrie::Revealed(trie) = &mut self.state { trie.update_subtrie_hashes(); @@ -576,21 +585,38 @@ where &mut self, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult { - // record revealed node metrics + // record revealed node metrics and capacity metrics #[cfg(feature = "metrics")] - self.metrics.record(); + { + self.metrics.record(); + self.metrics.set_node_capacity(self.node_capacity()); + self.metrics.set_value_capacity(self.value_capacity()); + self.metrics.set_storage_trie_metrics( + self.storage.cleared_tries.len(), + self.storage.tries.len(), + ); + } Ok(self.revealed_trie_mut(provider_factory)?.root()) } /// Returns sparse trie root and trie updates if the trie has been revealed. + #[instrument(target = "trie::sparse", skip_all)] pub fn root_with_updates( &mut self, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<(B256, TrieUpdates)> { - // record revealed node metrics + // record revealed node metrics and capacity metrics #[cfg(feature = "metrics")] - self.metrics.record(); + { + self.metrics.record(); + self.metrics.set_node_capacity(self.node_capacity()); + self.metrics.set_value_capacity(self.value_capacity()); + self.metrics.set_storage_trie_metrics( + self.storage.cleared_tries.len(), + self.storage.tries.len(), + ); + } let storage_tries = self.storage_trie_updates(); let revealed = self.revealed_trie_mut(provider_factory)?; @@ -679,6 +705,7 @@ where /// /// Returns false if the new account info and storage trie are empty, indicating the account /// leaf should be removed. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_account( &mut self, address: B256, @@ -721,6 +748,7 @@ where /// /// Returns false if the new storage root is empty, and the account info was already empty, /// indicating the account leaf should be removed. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_account_storage_root( &mut self, address: B256, @@ -768,6 +796,7 @@ where } /// Remove the account leaf node. + #[instrument(target = "trie::sparse", skip_all)] pub fn remove_account_leaf( &mut self, path: &Nibbles, @@ -792,6 +821,16 @@ where storage_trie.remove_leaf(slot, provider)?; Ok(()) } + + /// The sum of the account trie's node capacity and the storage tries' node capacity + pub fn node_capacity(&self) -> usize { + self.state.node_capacity() + self.storage.total_node_capacity() + } + + /// The sum of the account trie's value capacity and the storage tries' value capacity + pub fn value_capacity(&self) -> usize { + self.state.value_capacity() + self.storage.total_value_capacity() + } } /// The fields of [`SparseStateTrie`] related to storage tries. This is kept separate from the rest @@ -867,6 +906,46 @@ impl StorageTries { .remove(account) .unwrap_or_else(|| self.cleared_revealed_paths.pop().unwrap_or_default()) } + + /// Sums the total node capacity in `cleared_tries` + fn total_cleared_tries_node_capacity(&self) -> usize { + self.cleared_tries.iter().map(|trie| trie.node_capacity()).sum() + } + + /// Sums the total value capacity in `cleared_tries` + fn total_cleared_tries_value_capacity(&self) -> usize { + self.cleared_tries.iter().map(|trie| trie.value_capacity()).sum() + } + + /// Calculates the sum of the active storage trie node capacity, ie the tries in `tries` + fn total_active_tries_node_capacity(&self) -> usize { + self.tries.values().map(|trie| trie.node_capacity()).sum() + } + + /// Calculates the sum of the active storage trie value capacity, ie the tries in `tries` + fn total_active_tries_value_capacity(&self) -> usize { + self.tries.values().map(|trie| trie.value_capacity()).sum() + } + + /// Calculates the sum of active and cleared storage trie node capacity, i.e. the sum of + /// * [`StorageTries::total_active_tries_node_capacity`], and + /// * [`StorageTries::total_cleared_tries_node_capacity`] + /// * the default trie's node capacity + fn total_node_capacity(&self) -> usize { + self.total_active_tries_node_capacity() + + self.total_cleared_tries_node_capacity() + + self.default_trie.node_capacity() + } + + /// Calculates the sum of active and cleared storage trie value capacity, i.e. the sum of + /// * [`StorageTries::total_active_tries_value_capacity`], and + /// * [`StorageTries::total_cleared_tries_value_capacity`], and + /// * the default trie's value capacity + fn total_value_capacity(&self) -> usize { + self.total_active_tries_value_capacity() + + self.total_cleared_tries_value_capacity() + + self.default_trie.value_capacity() + } } #[derive(Debug, PartialEq, Eq, Default)] diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 300ac39c1b6..8fdbb78d876 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -222,6 +222,12 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// /// This is useful for reusing the trie without needing to reallocate memory. fn clear(&mut self); + + /// This returns the capacity of any inner data structures which store nodes. + fn node_capacity(&self) -> usize; + + /// This returns the capacity of any inner data structures which store leaf values. + fn value_capacity(&self) -> usize; } /// Struct for passing around branch node mask information. diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index cbffe5e7563..737da842254 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -24,7 +24,7 @@ use reth_trie_common::{ TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace}; /// The level below which the sparse trie hashes are calculated in /// [`SerialSparseTrie::update_subtrie_hashes`]. @@ -175,6 +175,7 @@ impl SparseTrie { /// and resetting the trie to only contain an empty root node. /// /// Note: This method will error if the trie is blinded. + #[instrument(target = "trie::sparse", skip_all)] pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.wipe(); @@ -191,6 +192,7 @@ impl SparseTrie { /// /// - `Some(B256)` with the calculated root hash if the trie is revealed. /// - `None` if the trie is still blind. + #[instrument(target = "trie::sparse", skip_all)] pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } @@ -230,6 +232,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_leaf( &mut self, path: Nibbles, @@ -246,6 +249,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed + #[instrument(target = "trie::sparse", skip_all)] pub fn remove_leaf( &mut self, path: &Nibbles, @@ -255,6 +259,22 @@ impl SparseTrie { revealed.remove_leaf(path, provider)?; Ok(()) } + + /// Returns the allocated capacity for sparse trie nodes. + pub fn node_capacity(&self) -> usize { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => trie.node_capacity(), + _ => 0, + } + } + + /// Returns the allocated capacity for sparse trie values. + pub fn value_capacity(&self) -> usize { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => trie.value_capacity(), + _ => 0, + } + } } /// The representation of revealed sparse trie. @@ -573,14 +593,13 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn update_leaf( &mut self, full_path: Nibbles, value: Vec, provider: P, ) -> SparseTrieResult<()> { - trace!(target: "trie::sparse", ?full_path, ?value, "update_leaf called"); - self.prefix_set.insert(full_path); let existing = self.values.insert(full_path, value); if existing.is_some() { @@ -712,6 +731,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn remove_leaf( &mut self, full_path: &Nibbles, @@ -897,6 +917,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self))] fn root(&mut self) -> B256 { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1059,6 +1080,14 @@ impl SparseTrieInterface for SerialSparseTrie { // If we get here, there's no leaf at the target path Ok(LeafLookup::NonExistent) } + + fn node_capacity(&self) -> usize { + self.nodes.capacity() + } + + fn value_capacity(&self) -> usize { + self.values.capacity() + } } impl SerialSparseTrie { @@ -1324,6 +1353,7 @@ impl SerialSparseTrie { /// /// This function identifies all nodes that have changed (based on the prefix set) at the given /// depth and recalculates their RLP representation. + #[instrument(target = "trie::sparse::serial", skip(self))] pub fn update_rlp_node_level(&mut self, depth: usize) { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1369,6 +1399,7 @@ impl SerialSparseTrie { /// specified depth. /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be /// tracked for future updates. + #[instrument(target = "trie::sparse::serial", skip(self))] fn get_changed_nodes_at_depth( &self, prefix_set: &mut PrefixSet, @@ -1455,6 +1486,7 @@ impl SerialSparseTrie { /// # Panics /// /// If the node at provided path does not exist. + #[instrument(target = "trie::sparse::serial", skip_all, ret(level = "trace"))] pub fn rlp_node( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/trie/src/hashed_cursor/mock.rs b/crates/trie/trie/src/hashed_cursor/mock.rs index 895bf852a22..308f05e4c8a 100644 --- a/crates/trie/trie/src/hashed_cursor/mock.rs +++ b/crates/trie/trie/src/hashed_cursor/mock.rs @@ -101,7 +101,7 @@ impl MockHashedCursor { impl HashedCursor for MockHashedCursor { type Value = T; - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek(&mut self, key: B256) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. let entry = self.values.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); @@ -115,7 +115,7 @@ impl HashedCursor for MockHashedCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.values.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index 862176c803a..e11cd51f790 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -191,11 +191,10 @@ where /// /// NOTE: The iteration will start from the key of the previous hashed entry if it was supplied. #[instrument( - level = "trace", target = "trie::node_iter", skip_all, fields(trie_type = ?self.trie_type), - ret + ret(level = "trace") )] pub fn try_next( &mut self, diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index 4b0b7f699dc..add2d7ddef3 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -103,7 +103,7 @@ impl MockTrieCursor { } impl TrieCursor for MockTrieCursor { - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek_exact( &mut self, key: Nibbles, @@ -119,7 +119,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek( &mut self, key: Nibbles, @@ -136,7 +136,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.trie_nodes.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first @@ -155,7 +155,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn current(&mut self) -> Result, DatabaseError> { Ok(self.current_key) } diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index f12bf46f748..0ea466437f5 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -157,7 +157,7 @@ impl> TrieWalker { } /// Returns the next unprocessed key in the trie along with its raw [`Nibbles`] representation. - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] pub fn next_unprocessed_key(&self) -> Option<(B256, Nibbles)> { self.key() .and_then(|key| if self.can_skip_current_node { key.increment() } else { Some(*key) }) @@ -297,7 +297,7 @@ impl> TrieWalker { } /// Consumes the next node in the trie, updating the stack. - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn consume_node(&mut self) -> Result<(), DatabaseError> { let Some((key, node)) = self.node(false)? else { // If no next node is found, clear the stack. @@ -343,7 +343,7 @@ impl> TrieWalker { } /// Moves to the next sibling node in the trie, updating the stack. - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn move_to_next_sibling( &mut self, allow_root_to_child_nibble: bool, diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index feb4e8bf50d..0344c23bf2c 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -96,7 +96,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 6c7cf532995..adc08cd96e6 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -82,7 +82,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 04b779c0f13..91397e0f7e9 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -147,7 +147,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index d4a32382302..834fd42e447 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 5f1f9935b0f..0b64cefb71b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -91,7 +91,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 324e6f15ca2..eb4120a34cb 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 375692f315f..913c6fcc5eb 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -93,7 +93,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 24c2493d6c8..b5120d7409a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -126,7 +126,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 58f4e3771b9..e0a54dcac35 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -89,7 +89,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 93d12e2130e..0d027754d59 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -91,7 +91,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 7f1a6e2a121..2ea1ea48f2e 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 7ec416f4a4d..21e08493453 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 7a9ee35145e..55e14d822cd 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -132,7 +132,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 113fbb21509..3f95c5761d9 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -86,7 +86,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index e4fd2eeb118..d972bcccd54 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -89,7 +89,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index cb100a63e4f..1fd305c4e63 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index 88616890e51..c2b50b8944f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -86,7 +86,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index f6b75e785d2..1890b95821d 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -144,7 +144,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 48ccb4855a6..4791d561980 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -85,7 +85,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 0f769e77599..430e0948a99 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -150,7 +150,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 71742b25b33..c0d03852de9 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -145,7 +145,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 80621a4deac..b5795a6e1d7 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -146,7 +146,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 86132c163d4..1ba1affc519 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -80,9 +80,6 @@ Database: --header Header file containing the header in an RLP encoded format. - --total-difficulty - Total difficulty of the header. - --header-hash Hash of the header. @@ -169,7 +166,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 81be59d6789..11777b1f6e6 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -134,7 +134,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 9b46593a3de..a752f76b019 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -734,6 +734,11 @@ Dev testnet: Parses strings using [`humantime::parse_duration`] --dev.block-time 12s + --dev.mnemonic + Derive dev accounts from a fixed mnemonic instead of random ones. + + [default: "test test test test test test test test test test test junk"] + Pruning: --full Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored @@ -976,7 +981,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 2fc4aa30849..4138656604d 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -83,7 +83,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index 10efb9b85d7..63f77913f9c 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -303,7 +303,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 7541ba55651..578932411f6 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -94,7 +94,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index f854ab9000b..f9b3276ced0 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -303,7 +303,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 1d287c7cf09..8bf19d3ecab 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -80,7 +80,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index d4f07885fea..de13e93b561 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -80,7 +80,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 202a14b2e19..bc5d0385697 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -134,7 +134,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 2bb23f77d23..dc3bcbe4627 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -147,7 +147,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index eed32a608be..85f2559de4d 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -83,7 +83,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 02385552032..923fd5ff955 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -149,7 +149,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 6dbee5df10c..2466edcb966 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -141,7 +141,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 13819423bfd..c79571b31c3 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 73b24e9ba46..c2480bae00f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index a5b3c0f4ff6..423771b183b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index e6deadb2581..211f4e59979 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index d561eb3ce79..9eae5963a17 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -370,7 +370,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index fa62d0546d6..ab5776e2e5b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -142,7 +142,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index 2799b752fef..500cb3197fb 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index d2056f7e349..4ec68dbb1ec 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 5519846458c..c54ef2ad7b1 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -433,7 +433,11 @@ where .map(|(i, tx)| { tx.signature() .recover_from_prehash(&tx.signature_hash()) - .map(|keys| keys.to_encoded_point(false).as_bytes().try_into().unwrap()) + .map(|keys| { + UncompressedPublicKey( + keys.to_encoded_point(false).as_bytes().try_into().unwrap(), + ) + }) .map_err(|e| format!("failed to recover signature for tx #{i}: {e}").into()) }) .collect::, _>>()